diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index fc9784544b24..921301fddb8d 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -430,7 +430,7 @@ static void smaps_page_accumulate(struct mem_size_stats *mss, } static void smaps_account(struct mem_size_stats *mss, struct page *page, - bool compound, bool young, bool dirty, bool locked) + bool compound, bool young, bool dirty, bool locked, bool migration_entry) { int i, nr = compound ? compound_nr(page) : 1; unsigned long size = nr * PAGE_SIZE; @@ -457,8 +457,13 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, * page_count(page) == 1 guarantees the page is mapped exactly once. * If any subpage of the compound page mapped with PTE it would elevate * page_count(). + * + * DIRTY HACK: count migration entry as having a single mapping instead + * of trying to figure out the actual mapcount, because page_mapcount() + * will fall on its face if the page is a compound page in the middle + * of being split. */ - if (page_count(page) == 1) { + if (page_count(page) == 1 || migration_entry) { smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty, locked, true); return; @@ -495,6 +500,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, struct vm_area_struct *vma = walk->vma; bool locked = !!(vma->vm_flags & VM_LOCKED); struct page *page = NULL; + bool migration_entry = false; if (pte_present(*pte)) { page = vm_normal_page(vma, addr, *pte); @@ -514,9 +520,10 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, } else { mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; } - } else if (is_migration_entry(swpent)) + } else if (is_migration_entry(swpent)) { page = migration_entry_to_page(swpent); - else if (is_device_private_entry(swpent)) + migration_entry = true; + } else if (is_device_private_entry(swpent)) page = device_private_entry_to_page(swpent); } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap && pte_none(*pte))) { @@ -530,7 +537,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, if (!page) return; - smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked); + smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked, migration_entry); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -541,6 +548,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, struct vm_area_struct *vma = walk->vma; bool locked = !!(vma->vm_flags & VM_LOCKED); struct page *page = NULL; + bool migration_entry = false; if (pmd_present(*pmd)) { /* FOLL_DUMP will return -EFAULT on huge zero page */ @@ -548,8 +556,10 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) { swp_entry_t entry = pmd_to_swp_entry(*pmd); - if (is_migration_entry(entry)) + if (is_migration_entry(entry)) { page = migration_entry_to_page(entry); + migration_entry = true; + } } if (IS_ERR_OR_NULL(page)) return; @@ -561,7 +571,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, /* pass */; else mss->file_thp += HPAGE_PMD_SIZE; - smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked); + smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked, migration_entry); } #else static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,