diff --git a/mm/filemap.c b/mm/filemap.c
index c943d1b90cc2..9ff9a885e9bf 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2039,7 +2039,6 @@ static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
  * @start:	The starting page cache index
  * @end:	The final page index (inclusive).
  * @fbatch:	Where the resulting entries are placed.
- * @indices:	The cache indices corresponding to the entries in @entries
  *
  * find_get_entries() will search for and return a batch of entries in
  * the mapping.  The entries are placed in @fbatch.  find_get_entries()
@@ -2052,21 +2051,31 @@ static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
  * shmem/tmpfs, are included in the returned array.
  *
  * Return: The number of entries which were found.
+ * Also updates @start to be positioned after the last found entry
  */
-unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
-		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
+unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
+		pgoff_t end, struct folio_batch *fbatch)
 {
-	XA_STATE(xas, &mapping->i_pages, start);
+	XA_STATE(xas, &mapping->i_pages, *start);
+	unsigned long nr;
 	struct folio *folio;
 
 	rcu_read_lock();
 	while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
-		indices[fbatch->nr] = xas.xa_index;
 		if (!folio_batch_add(fbatch, folio))
 			break;
 	}
 	rcu_read_unlock();
+	nr = folio_batch_count(fbatch);
 
+	if (nr) {
+		folio = fbatch->folios[nr - 1];
+		nr = folio_nr_pages(folio);
+
+		if (folio_test_hugetlb(folio))
+			nr = 1;
+		*start = folio->index + nr;
+	}
 	return folio_batch_count(fbatch);
 }
 
@@ -2076,7 +2085,6 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
  * @start:	The starting page cache index.
  * @end:	The final page index (inclusive).
  * @fbatch:	Where the resulting entries are placed.
- * @indices:	The cache indices of the entries in @fbatch.
  *
  * find_lock_entries() will return a batch of entries from @mapping.
  * Swap, shadow and DAX entries are included.  Folios are returned
@@ -2089,17 +2097,19 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
  * locked or folios under writeback.
  *
  * Return: The number of entries which were found.
+ * Also updates @start to be positioned after the last found entry
  */
-unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
-		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
+unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
+		pgoff_t end, struct folio_batch *fbatch)
 {
-	XA_STATE(xas, &mapping->i_pages, start);
+	XA_STATE(xas, &mapping->i_pages, *start);
+	unsigned long nr;
 	struct folio *folio;
 
 	rcu_read_lock();
 	while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
 		if (!xa_is_value(folio)) {
-			if (folio->index < start)
+			if (folio->index < *start)
 				goto put;
 			if (folio->index + folio_nr_pages(folio) - 1 > end)
 				goto put;
@@ -2111,7 +2121,6 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
 			VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
 					folio);
 		}
-		indices[fbatch->nr] = xas.xa_index;
 		if (!folio_batch_add(fbatch, folio))
 			break;
 		continue;
@@ -2121,7 +2130,16 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
 		folio_put(folio);
 	}
 	rcu_read_unlock();
+	nr = folio_batch_count(fbatch);
 
+	if (nr) {
+		folio = fbatch->folios[nr - 1];
+		nr = folio_nr_pages(folio);
+
+		if (folio_test_hugetlb(folio))
+			nr = 1;
+		*start = folio->index + nr;
+	}
 	return folio_batch_count(fbatch);
 }
 
diff --git a/mm/internal.h b/mm/internal.h
index 785409805ed7..ccce1369702c 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -104,10 +104,10 @@ static inline void force_page_cache_readahead(struct address_space *mapping,
 	force_page_cache_ra(&ractl, nr_to_read);
 }
 
-unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
-		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
-unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
-		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
+unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
+		pgoff_t end, struct folio_batch *fbatch);
+unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
+		pgoff_t end, struct folio_batch *fbatch);
 void filemap_free_folio(struct address_space *mapping, struct folio *folio);
 int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
 bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
diff --git a/mm/shmem.c b/mm/shmem.c
index 42e5888bf84d..3341b47593ba 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -917,7 +917,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 	pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	pgoff_t end = (lend + 1) >> PAGE_SHIFT;
 	struct folio_batch fbatch;
-	pgoff_t indices[PAGEVEC_SIZE];
 	struct folio *folio;
 	bool same_folio;
 	long nr_swaps_freed = 0;
@@ -932,21 +931,18 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 
 	folio_batch_init(&fbatch);
 	index = start;
-	while (index < end && find_lock_entries(mapping, index, end - 1,
-			&fbatch, indices)) {
+	while (index < end && find_lock_entries(mapping, &index, end - 1,
+			&fbatch)) {
 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
 			folio = fbatch.folios[i];
 
-			index = indices[i];
-
 			if (xa_is_value(folio)) {
 				if (unfalloc)
 					continue;
 				nr_swaps_freed += !shmem_free_swap(mapping,
-								index, folio);
+							folio->index, folio);
 				continue;
 			}
-			index += folio_nr_pages(folio) - 1;
 
 			if (!unfalloc || !folio_test_uptodate(folio))
 				truncate_inode_folio(mapping, folio);
@@ -955,7 +951,6 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 		folio_batch_remove_exceptionals(&fbatch);
 		folio_batch_release(&fbatch);
 		cond_resched();
-		index++;
 	}
 
 	same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
@@ -987,8 +982,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 	while (index < end) {
 		cond_resched();
 
-		if (!find_get_entries(mapping, index, end - 1, &fbatch,
-				indices)) {
+		if (!find_get_entries(mapping, &index, end - 1, &fbatch)) {
 			/* If all gone or hole-punch or unfalloc, we're done */
 			if (index == start || end != -1)
 				break;
@@ -999,13 +993,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
 			folio = fbatch.folios[i];
 
-			index = indices[i];
 			if (xa_is_value(folio)) {
 				if (unfalloc)
 					continue;
-				if (shmem_free_swap(mapping, index, folio)) {
+				if (shmem_free_swap(mapping, folio->index, folio)) {
 					/* Swap was replaced by page: retry */
-					index--;
+					index = folio->index;
 					break;
 				}
 				nr_swaps_freed++;
@@ -1018,19 +1011,17 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
 				if (folio_mapping(folio) != mapping) {
 					/* Page was replaced by swap: retry */
 					folio_unlock(folio);
-					index--;
+					index = folio->index;
 					break;
 				}
 				VM_BUG_ON_FOLIO(folio_test_writeback(folio),
 						folio);
 				truncate_inode_folio(mapping, folio);
 			}
-			index = folio->index + folio_nr_pages(folio) - 1;
 			folio_unlock(folio);
 		}
 		folio_batch_remove_exceptionals(&fbatch);
 		folio_batch_release(&fbatch);
-		index++;
 	}
 
 	spin_lock_irq(&info->lock);
diff --git a/mm/truncate.c b/mm/truncate.c
index 0b0708bf935f..8652abfe9c84 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -58,7 +58,7 @@ static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
  * exceptional entries similar to what folio_batch_remove_exceptionals() does.
  */
 static void truncate_folio_batch_exceptionals(struct address_space *mapping,
-				struct folio_batch *fbatch, pgoff_t *indices)
+				struct folio_batch *fbatch)
 {
 	int i, j;
 	bool dax;
@@ -82,7 +82,6 @@ static void truncate_folio_batch_exceptionals(struct address_space *mapping,
 
 	for (i = j; i < folio_batch_count(fbatch); i++) {
 		struct folio *folio = fbatch->folios[i];
-		pgoff_t index = indices[i];
 
 		if (!xa_is_value(folio)) {
 			fbatch->folios[j++] = folio;
@@ -90,11 +89,11 @@ static void truncate_folio_batch_exceptionals(struct address_space *mapping,
 		}
 
 		if (unlikely(dax)) {
-			dax_delete_mapping_entry(mapping, index);
+			dax_delete_mapping_entry(mapping, folio->index);
 			continue;
 		}
 
-		__clear_shadow_entry(mapping, index, folio);
+		__clear_shadow_entry(mapping, folio->index, folio);
 	}
 
 	if (!dax) {
@@ -333,7 +332,6 @@ void truncate_inode_pages_range(struct address_space *mapping,
 	pgoff_t		start;		/* inclusive */
 	pgoff_t		end;		/* exclusive */
 	struct folio_batch fbatch;
-	pgoff_t		indices[PAGEVEC_SIZE];
 	pgoff_t		index;
 	int		i;
 	struct folio	*folio;
@@ -361,10 +359,9 @@ void truncate_inode_pages_range(struct address_space *mapping,
 
 	folio_batch_init(&fbatch);
 	index = start;
-	while (index < end && find_lock_entries(mapping, index, end - 1,
-			&fbatch, indices)) {
-		index = indices[folio_batch_count(&fbatch) - 1] + 1;
-		truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
+	while (index < end && find_lock_entries(mapping, &index, end - 1,
+			&fbatch)) {
+		truncate_folio_batch_exceptionals(mapping, &fbatch);
 		for (i = 0; i < folio_batch_count(&fbatch); i++)
 			truncate_cleanup_folio(fbatch.folios[i]);
 		delete_from_page_cache_batch(mapping, &fbatch);
@@ -401,8 +398,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
 	index = start;
 	while (index < end) {
 		cond_resched();
-		if (!find_get_entries(mapping, index, end - 1, &fbatch,
-				indices)) {
+		if (!find_get_entries(mapping, &index, end - 1, &fbatch)) {
 			/* If all gone from start onwards, we're done */
 			if (index == start)
 				break;
@@ -415,21 +411,18 @@ void truncate_inode_pages_range(struct address_space *mapping,
 			struct folio *folio = fbatch.folios[i];
 
 			/* We rely upon deletion not changing page->index */
-			index = indices[i];
-
 			if (xa_is_value(folio))
 				continue;
 
 			folio_lock(folio);
-			VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
+			VM_BUG_ON_FOLIO(!folio_contains(folio, folio->index),
+					folio);
 			folio_wait_writeback(folio);
 			truncate_inode_folio(mapping, folio);
 			folio_unlock(folio);
-			index = folio_index(folio) + folio_nr_pages(folio) - 1;
 		}
-		truncate_folio_batch_exceptionals(mapping, &fbatch, indices);
+		truncate_folio_batch_exceptionals(mapping, &fbatch);
 		folio_batch_release(&fbatch);
-		index++;
 	}
 }
 EXPORT_SYMBOL(truncate_inode_pages_range);
@@ -502,7 +495,6 @@ EXPORT_SYMBOL(truncate_inode_pages_final);
 unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
 		pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
 {
-	pgoff_t indices[PAGEVEC_SIZE];
 	struct folio_batch fbatch;
 	pgoff_t index = start;
 	unsigned long ret;
@@ -510,20 +502,18 @@ unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
 	int i;
 
 	folio_batch_init(&fbatch);
-	while (find_lock_entries(mapping, index, end, &fbatch, indices)) {
+	while (find_lock_entries(mapping, &index, end, &fbatch)) {
 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
 			struct folio *folio = fbatch.folios[i];
 
 			/* We rely upon deletion not changing folio->index */
-			index = indices[i];
 
 			if (xa_is_value(folio)) {
 				count += invalidate_exceptional_entry(mapping,
-								      index,
-								      folio);
+								  folio->index,
+								  folio);
 				continue;
 			}
-			index += folio_nr_pages(folio) - 1;
 
 			ret = mapping_evict_folio(mapping, folio);
 			folio_unlock(folio);
@@ -542,7 +532,6 @@ unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
 		folio_batch_remove_exceptionals(&fbatch);
 		folio_batch_release(&fbatch);
 		cond_resched();
-		index++;
 	}
 	return count;
 }
@@ -628,7 +617,6 @@ static int folio_launder(struct address_space *mapping, struct folio *folio)
 int invalidate_inode_pages2_range(struct address_space *mapping,
 				  pgoff_t start, pgoff_t end)
 {
-	pgoff_t indices[PAGEVEC_SIZE];
 	struct folio_batch fbatch;
 	pgoff_t index;
 	int i;
@@ -641,16 +629,14 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
 
 	folio_batch_init(&fbatch);
 	index = start;
-	while (find_get_entries(mapping, index, end, &fbatch, indices)) {
+	while (find_get_entries(mapping, &index, end, &fbatch)) {
 		for (i = 0; i < folio_batch_count(&fbatch); i++) {
 			struct folio *folio = fbatch.folios[i];
 
 			/* We rely upon deletion not changing folio->index */
-			index = indices[i];
-
 			if (xa_is_value(folio)) {
 				if (!invalidate_exceptional_entry2(mapping,
-						index, folio))
+						folio->index, folio))
 					ret = -EBUSY;
 				continue;
 			}
@@ -660,13 +646,14 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
 				 * If folio is mapped, before taking its lock,
 				 * zap the rest of the file in one hit.
 				 */
-				unmap_mapping_pages(mapping, index,
-						(1 + end - index), false);
+				unmap_mapping_pages(mapping, folio->index,
+					(1 + end - folio->index), false);
 				did_range_unmap = 1;
 			}
 
 			folio_lock(folio);
-			VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
+			VM_BUG_ON_FOLIO(!folio_contains(folio, folio->index),
+					folio);
 			if (folio->mapping != mapping) {
 				folio_unlock(folio);
 				continue;
@@ -689,7 +676,6 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
 		folio_batch_remove_exceptionals(&fbatch);
 		folio_batch_release(&fbatch);
 		cond_resched();
-		index++;
 	}
 	/*
 	 * For DAX we invalidate page tables after invalidating page cache.  We