--- x/mm/vma.c +++ y/mm/vma.c @@ -1263,11 +1263,12 @@ static void vms_complete_munmap_vmas(str mm = current->mm; mm->map_count -= vms->vma_count; mm->locked_vm -= vms->locked_vm; - if (vms->unlock) - mmap_write_downgrade(mm); - if (!vms->nr_pages) + if (!vms->nr_pages) { + if (vms->unlock) + mmap_write_unlock(mm); return; + } vms_clear_ptes(vms, mas_detach, !vms->unlock); /* Update high watermark before we lower total_vm */ @@ -1284,13 +1285,15 @@ static void vms_complete_munmap_vmas(str /* Remove and clean up vmas */ mas_set(mas_detach, 0); - mas_for_each(mas_detach, vma, ULONG_MAX) + mas_for_each(mas_detach, vma, ULONG_MAX) { + vma_start_write(vma); remove_vma(vma); + } vm_unacct_memory(vms->nr_accounted); validate_mm(mm); if (vms->unlock) - mmap_read_unlock(mm); + mmap_write_unlock(mm); __mt_destroy(mas_detach->tree); } --- x/mm/gup.c +++ y/mm/gup.c @@ -1419,6 +1419,7 @@ static long __get_user_pages(struct mm_s long ret = 0, i = 0; struct vm_area_struct *vma = NULL; struct follow_page_context ctx = { NULL }; + int vma_locked = 0; if (!nr_pages) return 0; @@ -1453,7 +1454,13 @@ static long __get_user_pages(struct mm_s } goto retry; } - vma = gup_vma_lookup(mm, start); + if (vma_locked) { + vma_end_read(vma); + vma_locked = 0; + } + vma = lock_vma_under_rcu(mm, start); + if (vma) + vma_locked = 1; if (!vma && in_gate_area(mm, start)) { ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, @@ -1566,6 +1573,8 @@ next_page: nr_pages -= page_increm; } while (nr_pages); out: + if (vma && vma_locked) + vma_end_read(vma); if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return i ? i : ret;