aboutsummaryrefslogtreecommitdiffstats
path: root/mm/readahead.c
diff options
context:
space:
mode:
authorFengguang Wu <wfg@mail.ustc.edu.cn>2007-07-19 04:48:08 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 13:04:44 -0400
commitf9acc8c7b35a100f3a9e0e6977f7807b0169f9a5 (patch)
tree6a4dcd227bb698a217a1d42d37e3f0135a444ea4 /mm/readahead.c
parentcf914a7d656e62b9dd3e0dffe4f62b953ae6048d (diff)
readahead: sanify file_ra_state names
Rename some file_ra_state variables and remove some accessors. It results in much simpler code. Kudos to Rusty! Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/readahead.c')
-rw-r--r--mm/readahead.c68
1 files changed, 25 insertions, 43 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index 3d262bb738a9..39bf45d43320 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -253,21 +253,16 @@ unsigned long max_sane_readahead(unsigned long nr)
253/* 253/*
254 * Submit IO for the read-ahead request in file_ra_state. 254 * Submit IO for the read-ahead request in file_ra_state.
255 */ 255 */
256unsigned long ra_submit(struct file_ra_state *ra, 256static unsigned long ra_submit(struct file_ra_state *ra,
257 struct address_space *mapping, struct file *filp) 257 struct address_space *mapping, struct file *filp)
258{ 258{
259 unsigned long ra_size;
260 unsigned long la_size;
261 int actual; 259 int actual;
262 260
263 ra_size = ra_readahead_size(ra);
264 la_size = ra_lookahead_size(ra);
265 actual = __do_page_cache_readahead(mapping, filp, 261 actual = __do_page_cache_readahead(mapping, filp,
266 ra->ra_index, ra_size, la_size); 262 ra->start, ra->size, ra->async_size);
267 263
268 return actual; 264 return actual;
269} 265}
270EXPORT_SYMBOL_GPL(ra_submit);
271 266
272/* 267/*
273 * Set the initial window size, round to next power of 2 and square 268 * Set the initial window size, round to next power of 2 and square
@@ -296,7 +291,7 @@ static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
296static unsigned long get_next_ra_size(struct file_ra_state *ra, 291static unsigned long get_next_ra_size(struct file_ra_state *ra,
297 unsigned long max) 292 unsigned long max)
298{ 293{
299 unsigned long cur = ra->readahead_index - ra->ra_index; 294 unsigned long cur = ra->size;
300 unsigned long newsize; 295 unsigned long newsize;
301 296
302 if (cur < max / 16) 297 if (cur < max / 16)
@@ -313,28 +308,21 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
313 * The fields in struct file_ra_state represent the most-recently-executed 308 * The fields in struct file_ra_state represent the most-recently-executed
314 * readahead attempt: 309 * readahead attempt:
315 * 310 *
316 * |-------- last readahead window -------->| 311 * |<----- async_size ---------|
317 * |-- application walking here -->| 312 * |------------------- size -------------------->|
318 * ======#============|==================#=====================| 313 * |==================#===========================|
319 * ^la_index ^ra_index ^lookahead_index ^readahead_index 314 * ^start ^page marked with PG_readahead
320 *
321 * [ra_index, readahead_index) represents the last readahead window.
322 *
323 * [la_index, lookahead_index] is where the application would be walking(in
324 * the common case of cache-cold sequential reads): the last window was
325 * established when the application was at la_index, and the next window will
326 * be bring in when the application reaches lookahead_index.
327 * 315 *
328 * To overlap application thinking time and disk I/O time, we do 316 * To overlap application thinking time and disk I/O time, we do
329 * `readahead pipelining': Do not wait until the application consumed all 317 * `readahead pipelining': Do not wait until the application consumed all
330 * readahead pages and stalled on the missing page at readahead_index; 318 * readahead pages and stalled on the missing page at readahead_index;
331 * Instead, submit an asynchronous readahead I/O as early as the application 319 * Instead, submit an asynchronous readahead I/O as soon as there are
332 * reads on the page at lookahead_index. Normally lookahead_index will be 320 * only async_size pages left in the readahead window. Normally async_size
333 * equal to ra_index, for maximum pipelining. 321 * will be equal to size, for maximum pipelining.
334 * 322 *
335 * In interleaved sequential reads, concurrent streams on the same fd can 323 * In interleaved sequential reads, concurrent streams on the same fd can
336 * be invalidating each other's readahead state. So we flag the new readahead 324 * be invalidating each other's readahead state. So we flag the new readahead
337 * page at lookahead_index with PG_readahead, and use it as readahead 325 * page at (start+size-async_size) with PG_readahead, and use it as readahead
338 * indicator. The flag won't be set on already cached pages, to avoid the 326 * indicator. The flag won't be set on already cached pages, to avoid the
339 * readahead-for-nothing fuss, saving pointless page cache lookups. 327 * readahead-for-nothing fuss, saving pointless page cache lookups.
340 * 328 *
@@ -363,24 +351,21 @@ ondemand_readahead(struct address_space *mapping,
363 unsigned long req_size) 351 unsigned long req_size)
364{ 352{
365 unsigned long max; /* max readahead pages */ 353 unsigned long max; /* max readahead pages */
366 pgoff_t ra_index; /* readahead index */
367 unsigned long ra_size; /* readahead size */
368 unsigned long la_size; /* lookahead size */
369 int sequential; 354 int sequential;
370 355
371 max = ra->ra_pages; 356 max = ra->ra_pages;
372 sequential = (offset - ra->prev_index <= 1UL) || (req_size > max); 357 sequential = (offset - ra->prev_index <= 1UL) || (req_size > max);
373 358
374 /* 359 /*
375 * Lookahead/readahead hit, assume sequential access. 360 * It's the expected callback offset, assume sequential access.
376 * Ramp up sizes, and push forward the readahead window. 361 * Ramp up sizes, and push forward the readahead window.
377 */ 362 */
378 if (offset && (offset == ra->lookahead_index || 363 if (offset && (offset == (ra->start + ra->size - ra->async_size) ||
379 offset == ra->readahead_index)) { 364 offset == (ra->start + ra->size))) {
380 ra_index = ra->readahead_index; 365 ra->start += ra->size;
381 ra_size = get_next_ra_size(ra, max); 366 ra->size = get_next_ra_size(ra, max);
382 la_size = ra_size; 367 ra->async_size = ra->size;
383 goto fill_ra; 368 goto readit;
384 } 369 }
385 370
386 /* 371 /*
@@ -399,24 +384,21 @@ ondemand_readahead(struct address_space *mapping,
399 * - oversize random read 384 * - oversize random read
400 * Start readahead for it. 385 * Start readahead for it.
401 */ 386 */
402 ra_index = offset; 387 ra->start = offset;
403 ra_size = get_init_ra_size(req_size, max); 388 ra->size = get_init_ra_size(req_size, max);
404 la_size = ra_size > req_size ? ra_size - req_size : ra_size; 389 ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
405 390
406 /* 391 /*
407 * Hit on a lookahead page without valid readahead state. 392 * Hit on a marked page without valid readahead state.
408 * E.g. interleaved reads. 393 * E.g. interleaved reads.
409 * Not knowing its readahead pos/size, bet on the minimal possible one. 394 * Not knowing its readahead pos/size, bet on the minimal possible one.
410 */ 395 */
411 if (hit_readahead_marker) { 396 if (hit_readahead_marker) {
412 ra_index++; 397 ra->start++;
413 ra_size = min(4 * ra_size, max); 398 ra->size = get_next_ra_size(ra, max);
414 } 399 }
415 400
416fill_ra: 401readit:
417 ra_set_index(ra, offset, ra_index);
418 ra_set_size(ra, ra_size, la_size);
419
420 return ra_submit(ra, mapping, filp); 402 return ra_submit(ra, mapping, filp);
421} 403}
422 404