summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarkus Stockhausen <stockhausen@collogia.de>2018-07-27 11:09:53 -0400
committerJens Axboe <axboe@kernel.dk>2018-07-27 11:09:53 -0400
commitdc30b96ab6d569060741572cf30517d3179429a8 (patch)
tree9b84c38b14a2064c82766d3beca0511820e4606c
parentcdcdcaae8450a975e7d07e1bfec21f9b8c016d0c (diff)
readahead: stricter check for bdi io_pages
ondemand_readahead() checks bdi->io_pages to cap the maximum pages that need to be processed. This works until the readit section. If we would do an async only readahead (async size = sync size) and target is at beginning of window we expand the pages by another get_next_ra_size() pages. Btrace for large reads shows that kernel always issues a doubled size read at the beginning of processing. Add an additional check for io_pages in the lower part of the func. The fix helps devices that hard limit bio pages and rely on proper handling of max_hw_read_sectors (e.g. older FusionIO cards). For that reason it could qualify for stable. Fixes: 9491ae4a ("mm: don't cap request size based on read-ahead setting") Cc: stable@vger.kernel.org Signed-off-by: Markus Stockhausen stockhausen@collogia.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--mm/readahead.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index 9f62b7151100..a59ea70527b9 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -386,6 +386,7 @@ ondemand_readahead(struct address_space *mapping,
386{ 386{
387 struct backing_dev_info *bdi = inode_to_bdi(mapping->host); 387 struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
388 unsigned long max_pages = ra->ra_pages; 388 unsigned long max_pages = ra->ra_pages;
389 unsigned long add_pages;
389 pgoff_t prev_offset; 390 pgoff_t prev_offset;
390 391
391 /* 392 /*
@@ -475,10 +476,17 @@ readit:
475 * Will this read hit the readahead marker made by itself? 476 * Will this read hit the readahead marker made by itself?
476 * If so, trigger the readahead marker hit now, and merge 477 * If so, trigger the readahead marker hit now, and merge
477 * the resulted next readahead window into the current one. 478 * the resulted next readahead window into the current one.
479 * Take care of maximum IO pages as above.
478 */ 480 */
479 if (offset == ra->start && ra->size == ra->async_size) { 481 if (offset == ra->start && ra->size == ra->async_size) {
480 ra->async_size = get_next_ra_size(ra, max_pages); 482 add_pages = get_next_ra_size(ra, max_pages);
481 ra->size += ra->async_size; 483 if (ra->size + add_pages <= max_pages) {
484 ra->async_size = add_pages;
485 ra->size += add_pages;
486 } else {
487 ra->size = max_pages;
488 ra->async_size = max_pages >> 1;
489 }
482 } 490 }
483 491
484 return ra_submit(ra, mapping, filp); 492 return ra_submit(ra, mapping, filp);