diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 92e6f56a932d..e597dde124b1 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1840,6 +1840,13 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, goto xa_locked; } xas_store(&xas, hpage); + if (xas_error(&xas)) { + mapping->nrpages--; + shmem_uncharge(mapping->host, 1); + result = SCAN_FAIL; + pr_info("JUMPING\n"); + goto xa_locked; + } nr_none++; continue; } @@ -2087,6 +2094,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, hpage = NULL; } else { struct page *page; + pr_info("Starting restoring...\n"); /* Something went wrong: roll back page cache changes */ xas_lock_irq(&xas); @@ -2096,7 +2104,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, } xas_set(&xas, start); - xas_for_each(&xas, page, end - 1) { + xas_for_each(&xas, page, index) { page = list_first_entry_or_null(&pagelist, struct page, lru); if (!page || xas.xa_index < page->index) { @@ -2124,18 +2132,21 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, xas_unlock_irq(&xas); hpage->mapping = NULL; + pr_info("Next stage\n"); } if (hpage) unlock_page(hpage); + pr_info("Third stage...\n"); out: VM_BUG_ON(!list_empty(&pagelist)); if (hpage) { mem_cgroup_uncharge(page_folio(hpage)); put_page(hpage); } - + pr_info("4th stage...\n"); trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result); + pr_info("exiting...\n"); return result; }