diff --git a/mm/memory.c b/mm/memory.c index 1f18ed4a5497..8939357f1509 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -425,6 +425,8 @@ void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte) * being the notable exception) will already guarantee loads are * seen in-order. See the alpha page table accessors for the * smp_rmb() barriers in page table walking code. + * + * See __pte_offset_map() for the smp_rmb() at the pte level. */ smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ pmd_populate(mm, pmd, *pte); diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index 4fcd959dcc4d..3330b666e9c3 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c @@ -297,6 +297,11 @@ pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp) pmd_clear_bad(pmd); goto nomap; } + /* + * Pair with the smp_wmb() in pmd_install(): make sure that the + * page table lock and page table contents are visibly initialized. + */ + smp_rmb(); return __pte_map(&pmdval, addr); nomap: rcu_read_unlock();