diff --git a/mm/memory.c b/mm/memory.c index 6c264d2f969c..d816763777d5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4331,20 +4331,26 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); bool write = vmf->flags & FAULT_FLAG_WRITE; bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE); - pte_t entry; + pte_t entry[nr]; + int i; + struct page *p = page; flush_icache_pages(vma, page, nr); - entry = mk_pte(page, vma->vm_page_prot); - - if (prefault && arch_wants_old_prefaulted_pte()) - entry = pte_mkold(entry); - else - entry = pte_sw_mkyoung(entry); - - if (write) - entry = maybe_mkwrite(pte_mkdirty(entry), vma); - if (unlikely(uffd_wp)) - entry = pte_mkuffd_wp(entry); + + for (i = 0;i < nr;i++) { + entry[i] = mk_pte(p, vma->vm_page_prot); + + if (prefault && arch_wants_old_prefaulted_pte()) + entry[i] = pte_mkold(entry[i]); + else + entry[i] = pte_sw_mkyoung(entry[i]); + + if (write) + entry[i] = maybe_mkwrite(pte_mkdirty(entry[i]), vma); + if (unlikely(uffd_wp)) + entry[i] = pte_mkuffd_wp(entry[i]); + p++; + } /* copy-on-write page */ if (write && !(vma->vm_flags & VM_SHARED)) { add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); @@ -4355,7 +4361,8 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, add_mm_counter(vma->vm_mm, mm_counter_file(page), nr); folio_add_file_rmap_range(folio, page, nr, vma, false); } - set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); + for (i = 0;i < nr;i++) + set_ptes(vma->vm_mm, addr, vmf->pte, entry[i], 1); /* no need to invalidate: a not-present page won't be cached */ update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr);