diff --git a/mm/readahead.c b/mm/readahead.c index 3a4b5d58eeb6..95718f87bd43 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -467,7 +467,7 @@ void page_cache_ra_order(struct readahead_control *ractl, struct address_space *mapping = ractl->mapping; pgoff_t start = readahead_index(ractl); pgoff_t index = start; - unsigned int min_order = mapping_min_folio_order(mapping); + unsigned int min_order; pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT; pgoff_t mark = index + ra->size - ra->async_size; unsigned int nofs; @@ -483,15 +483,22 @@ void page_cache_ra_order(struct readahead_control *ractl, limit = min(limit, index + ra->size - 1); + /* See comment in page_cache_ra_unbounded() */ + nofs = memalloc_nofs_save(); + filemap_invalidate_lock_shared(mapping); + + /* + * Re-read min_order after acquiring the invalidate_lock to avoid a + * race with set_blocksize() which can change the mapping's min_order + * while holding the invalidate_lock exclusively. + */ + min_order = mapping_min_folio_order(mapping); new_order = min(mapping_max_folio_order(mapping), new_order); new_order = min_t(unsigned int, new_order, ilog2(ra->size)); new_order = max(new_order, min_order); ra->order = new_order; - /* See comment in page_cache_ra_unbounded() */ - nofs = memalloc_nofs_save(); - filemap_invalidate_lock_shared(mapping); /* * If the new_order is greater than min_order and index is * already aligned to new_order, then this will be noop as index