aboutsummaryrefslogtreecommitdiffstats
path: root/mm/readahead.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/readahead.c')
-rw-r--r--mm/readahead.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index d2504877b269..4a58befbde4a 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -46,7 +46,7 @@ void
46file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) 46file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
47{ 47{
48 ra->ra_pages = mapping->backing_dev_info->ra_pages; 48 ra->ra_pages = mapping->backing_dev_info->ra_pages;
49 ra->prev_index = -1; 49 ra->prev_pos = -1;
50} 50}
51EXPORT_SYMBOL_GPL(file_ra_state_init); 51EXPORT_SYMBOL_GPL(file_ra_state_init);
52 52
@@ -327,7 +327,7 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
327 * indicator. The flag won't be set on already cached pages, to avoid the 327 * indicator. The flag won't be set on already cached pages, to avoid the
328 * readahead-for-nothing fuss, saving pointless page cache lookups. 328 * readahead-for-nothing fuss, saving pointless page cache lookups.
329 * 329 *
330 * prev_index tracks the last visited page in the _previous_ read request. 330 * prev_pos tracks the last visited byte in the _previous_ read request.
331 * It should be maintained by the caller, and will be used for detecting 331 * It should be maintained by the caller, and will be used for detecting
332 * small random reads. Note that the readahead algorithm checks loosely 332 * small random reads. Note that the readahead algorithm checks loosely
333 * for sequential patterns. Hence interleaved reads might be served as 333 * for sequential patterns. Hence interleaved reads might be served as
@@ -351,11 +351,9 @@ ondemand_readahead(struct address_space *mapping,
351 bool hit_readahead_marker, pgoff_t offset, 351 bool hit_readahead_marker, pgoff_t offset,
352 unsigned long req_size) 352 unsigned long req_size)
353{ 353{
354 int max; /* max readahead pages */ 354 int max = ra->ra_pages; /* max readahead pages */
355 int sequential; 355 pgoff_t prev_offset;
356 356 int sequential;
357 max = ra->ra_pages;
358 sequential = (offset - ra->prev_index <= 1UL) || (req_size > max);
359 357
360 /* 358 /*
361 * It's the expected callback offset, assume sequential access. 359 * It's the expected callback offset, assume sequential access.
@@ -369,6 +367,9 @@ ondemand_readahead(struct address_space *mapping,
369 goto readit; 367 goto readit;
370 } 368 }
371 369
370 prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
371 sequential = offset - prev_offset <= 1UL || req_size > max;
372
372 /* 373 /*
373 * Standalone, small read. 374 * Standalone, small read.
374 * Read as is, and do not pollute the readahead state. 375 * Read as is, and do not pollute the readahead state.