--- x/mm/hugetlb.c +++ h/mm/hugetlb.c @@ -5575,7 +5575,7 @@ static vm_fault_t hugetlb_no_page(struct struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address, pte_t *ptep, - pte_t old_pte, unsigned int flags) + pte_t old_pte, unsigned int flags, struct mutex *flt_mutex) { struct hstate *h = hstate_vma(vma); vm_fault_t ret = VM_FAULT_SIGBUS; @@ -5768,6 +5768,7 @@ static vm_fault_t hugetlb_no_page(struct unlock_page(page); out: hugetlb_vma_unlock_read(vma); + BUG_ON(flt_mutex != &hugetlb_fault_mutex_table[hash]); mutex_unlock(&hugetlb_fault_mutex_table[hash]); return ret; @@ -5820,6 +5821,7 @@ vm_fault_t hugetlb_fault(struct mm_struc struct address_space *mapping; int need_wait_lock = 0; unsigned long haddr = address & huge_page_mask(h); + struct mutex *flt_mutex; ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); if (ptep) { @@ -5846,6 +5848,7 @@ vm_fault_t hugetlb_fault(struct mm_struc idx = vma_hugecache_offset(h, vma, haddr); hash = hugetlb_fault_mutex_hash(mapping, idx); mutex_lock(&hugetlb_fault_mutex_table[hash]); + flt_mutex = &hugetlb_fault_mutex_table[hash]; /* * Acquire vma lock before calling huge_pte_alloc and hold @@ -5872,7 +5875,7 @@ vm_fault_t hugetlb_fault(struct mm_struc * mutex internally, which make us return immediately. */ return hugetlb_no_page(mm, vma, mapping, idx, address, ptep, - entry, flags); + entry, flags, flt_mutex); ret = 0;