--- x/mm/hugetlb.c +++ y/mm/hugetlb.c @@ -4923,6 +4923,7 @@ int copy_hugetlb_page_range(struct mm_st src_vma->vm_end); mmu_notifier_invalidate_range_start(&range); vma_assert_write_locked(src_vma); + hugetlb_vma_lock_write(src_vma); raw_write_seqcount_begin(&src->write_protect_seq); } else { /* @@ -5084,6 +5085,7 @@ again: if (cow) { raw_write_seqcount_end(&src->write_protect_seq); + hugetlb_vma_unlock_write(src_vma); mmu_notifier_invalidate_range_end(&range); } else { hugetlb_vma_unlock_read(src_vma); --- x/mm/rmap.c +++ y/mm/rmap.c @@ -1946,6 +1946,8 @@ static bool try_to_migrate_one(struct fo hsz = huge_page_size(hstate_vma(vma)); } mmu_notifier_invalidate_range_start(&range); + if (vma->vm_file) + filemap_invalidate_lock(vma->vm_file->f_mapping); while (page_vma_mapped_walk(&pvmw)) { #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION @@ -2200,6 +2202,8 @@ static bool try_to_migrate_one(struct fo folio_put(folio); } + if (vma->vm_file) + filemap_invalidate_unlock(vma->vm_file->f_mapping); mmu_notifier_invalidate_range_end(&range); return ret;