aboutsummaryrefslogtreecommitdiffstats
path: root/mm/readahead.c
diff options
context:
space:
mode:
authorFengguang Wu <wfg@mail.ustc.edu.cn>2007-10-16 04:24:33 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:52 -0400
commitf4e6b498d6e06742d72706ef50593a9c4dd72214 (patch)
tree74a573302b2ea086c0d21907175be604f110f5b1 /mm/readahead.c
parent0bb7ba6b9c358c12084a3cbc6ac08c8d1e973937 (diff)
readahead: combine file_ra_state.prev_index/prev_offset into prev_pos
Combine the file_ra_state members unsigned long prev_index unsigned int prev_offset into loff_t prev_pos It is more consistent and better supports huge files. Thanks to Peter for the nice proposal! [akpm@linux-foundation.org: fix shift overflow] Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/readahead.c')
-rw-r--r--mm/readahead.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index d2504877b269..4a58befbde4a 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -46,7 +46,7 @@ void
46file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) 46file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
47{ 47{
48 ra->ra_pages = mapping->backing_dev_info->ra_pages; 48 ra->ra_pages = mapping->backing_dev_info->ra_pages;
49 ra->prev_index = -1; 49 ra->prev_pos = -1;
50} 50}
51EXPORT_SYMBOL_GPL(file_ra_state_init); 51EXPORT_SYMBOL_GPL(file_ra_state_init);
52 52
@@ -327,7 +327,7 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
327 * indicator. The flag won't be set on already cached pages, to avoid the 327 * indicator. The flag won't be set on already cached pages, to avoid the
328 * readahead-for-nothing fuss, saving pointless page cache lookups. 328 * readahead-for-nothing fuss, saving pointless page cache lookups.
329 * 329 *
330 * prev_index tracks the last visited page in the _previous_ read request. 330 * prev_pos tracks the last visited byte in the _previous_ read request.
331 * It should be maintained by the caller, and will be used for detecting 331 * It should be maintained by the caller, and will be used for detecting
332 * small random reads. Note that the readahead algorithm checks loosely 332 * small random reads. Note that the readahead algorithm checks loosely
333 * for sequential patterns. Hence interleaved reads might be served as 333 * for sequential patterns. Hence interleaved reads might be served as
@@ -351,11 +351,9 @@ ondemand_readahead(struct address_space *mapping,
351 bool hit_readahead_marker, pgoff_t offset, 351 bool hit_readahead_marker, pgoff_t offset,
352 unsigned long req_size) 352 unsigned long req_size)
353{ 353{
354 int max; /* max readahead pages */ 354 int max = ra->ra_pages; /* max readahead pages */
355 int sequential; 355 pgoff_t prev_offset;
356 356 int sequential;
357 max = ra->ra_pages;
358 sequential = (offset - ra->prev_index <= 1UL) || (req_size > max);
359 357
360 /* 358 /*
361 * It's the expected callback offset, assume sequential access. 359 * It's the expected callback offset, assume sequential access.
@@ -369,6 +367,9 @@ ondemand_readahead(struct address_space *mapping,
369 goto readit; 367 goto readit;
370 } 368 }
371 369
370 prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
371 sequential = offset - prev_offset <= 1UL || req_size > max;
372
372 /* 373 /*
373 * Standalone, small read. 374 * Standalone, small read.
374 * Read as is, and do not pollute the readahead state. 375 * Read as is, and do not pollute the readahead state.