diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index cf97f3884fda..2a04a7ed6125 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -754,6 +754,7 @@ enum vm_fault_reason { VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000, VM_FAULT_COMPLETED = (__force vm_fault_t)0x004000, VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000, + VM_FAULT_USER = (__force vm_fault_t)0x100000, }; /* Encode hstate index for a hwpoisoned large page */ diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e070b8593b37..8d587aba2658 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5476,7 +5476,7 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, unsigned long addr, unsigned long reason) { - vm_fault_t ret; + vm_fault_t ret = VM_FAULT_USER; u32 hash; struct vm_fault vmf = { .vma = vma, @@ -5495,15 +5495,12 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, /* * hugetlb_fault_mutex and i_mmap_rwsem must be - * dropped before handling userfault. Reacquire - * after handling fault to make calling code simpler. + * dropped before handling userfault. */ hash = hugetlb_fault_mutex_hash(mapping, idx); mutex_unlock(&hugetlb_fault_mutex_table[hash]); i_mmap_unlock_read(mapping); - ret = handle_userfault(&vmf, reason); - i_mmap_lock_read(mapping); - mutex_lock(&hugetlb_fault_mutex_table[hash]); + ret |= handle_userfault(&vmf, reason); return ret; } @@ -5777,6 +5774,12 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (huge_pte_none_mostly(entry)) { ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, entry, flags); + + if (ret & VM_FAULT_USER) { + ret ^= VM_FAULT_USER; + goto user_fault; + } + goto out_mutex; } @@ -5885,6 +5888,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, * page fault. So we are safe from accessing freed page, even if we wait * here without taking refcount. */ +user_fault: if (need_wait_lock) wait_on_page_locked(page); return ret;