diff options
author | Fengguang Wu <wfg@mail.ustc.edu.cn> | 2007-07-19 04:47:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-19 13:04:43 -0400 |
commit | 46fc3e7b4e7233a0ac981ac9084b55217318d04d (patch) | |
tree | 555bededb43671605fa085d3b4d330d31aa4af5c /mm/readahead.c | |
parent | d77c2d7cc5126639a47d73300b40d461f2811a0f (diff) |
readahead: add look-ahead support to __do_page_cache_readahead()
Add look-ahead support to __do_page_cache_readahead().
It works by
- mark the Nth backwards page with PG_readahead,
(which instructs the page's first reader to invoke readahead)
- and only do the marking for newly allocated pages.
(to prevent blindly doing readahead on already cached pages)
Look-ahead is a technique to achieve I/O pipelining:
While the application is working through a chunk of cached pages, the kernel
reads-ahead the next chunk of pages _before_ time of need. It effectively
hides low level I/O latencies to high level applications.
Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn>
Cc: Steven Pratt <slpratt@austin.ibm.com>
Cc: Ram Pai <linuxram@us.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/readahead.c')
-rw-r--r-- | mm/readahead.c | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/mm/readahead.c b/mm/readahead.c index 9861e883fe57..88ea0f29aac8 100644 --- a/mm/readahead.c +++ b/mm/readahead.c | |||
@@ -265,7 +265,8 @@ out: | |||
265 | */ | 265 | */ |
266 | static int | 266 | static int |
267 | __do_page_cache_readahead(struct address_space *mapping, struct file *filp, | 267 | __do_page_cache_readahead(struct address_space *mapping, struct file *filp, |
268 | pgoff_t offset, unsigned long nr_to_read) | 268 | pgoff_t offset, unsigned long nr_to_read, |
269 | unsigned long lookahead_size) | ||
269 | { | 270 | { |
270 | struct inode *inode = mapping->host; | 271 | struct inode *inode = mapping->host; |
271 | struct page *page; | 272 | struct page *page; |
@@ -278,7 +279,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp, | |||
278 | if (isize == 0) | 279 | if (isize == 0) |
279 | goto out; | 280 | goto out; |
280 | 281 | ||
281 | end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); | 282 | end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); |
282 | 283 | ||
283 | /* | 284 | /* |
284 | * Preallocate as many pages as we will need. | 285 | * Preallocate as many pages as we will need. |
@@ -301,6 +302,8 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp, | |||
301 | break; | 302 | break; |
302 | page->index = page_offset; | 303 | page->index = page_offset; |
303 | list_add(&page->lru, &page_pool); | 304 | list_add(&page->lru, &page_pool); |
305 | if (page_idx == nr_to_read - lookahead_size) | ||
306 | SetPageReadahead(page); | ||
304 | ret++; | 307 | ret++; |
305 | } | 308 | } |
306 | read_unlock_irq(&mapping->tree_lock); | 309 | read_unlock_irq(&mapping->tree_lock); |
@@ -337,7 +340,7 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp, | |||
337 | if (this_chunk > nr_to_read) | 340 | if (this_chunk > nr_to_read) |
338 | this_chunk = nr_to_read; | 341 | this_chunk = nr_to_read; |
339 | err = __do_page_cache_readahead(mapping, filp, | 342 | err = __do_page_cache_readahead(mapping, filp, |
340 | offset, this_chunk); | 343 | offset, this_chunk, 0); |
341 | if (err < 0) { | 344 | if (err < 0) { |
342 | ret = err; | 345 | ret = err; |
343 | break; | 346 | break; |
@@ -384,7 +387,7 @@ int do_page_cache_readahead(struct address_space *mapping, struct file *filp, | |||
384 | if (bdi_read_congested(mapping->backing_dev_info)) | 387 | if (bdi_read_congested(mapping->backing_dev_info)) |
385 | return -1; | 388 | return -1; |
386 | 389 | ||
387 | return __do_page_cache_readahead(mapping, filp, offset, nr_to_read); | 390 | return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0); |
388 | } | 391 | } |
389 | 392 | ||
390 | /* | 393 | /* |
@@ -404,7 +407,7 @@ blockable_page_cache_readahead(struct address_space *mapping, struct file *filp, | |||
404 | if (!block && bdi_read_congested(mapping->backing_dev_info)) | 407 | if (!block && bdi_read_congested(mapping->backing_dev_info)) |
405 | return 0; | 408 | return 0; |
406 | 409 | ||
407 | actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read); | 410 | actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0); |
408 | 411 | ||
409 | return check_ra_success(ra, nr_to_read, actual); | 412 | return check_ra_success(ra, nr_to_read, actual); |
410 | } | 413 | } |
@@ -449,7 +452,7 @@ static int make_ahead_window(struct address_space *mapping, struct file *filp, | |||
449 | * @req_size: hint: total size of the read which the caller is performing in | 452 | * @req_size: hint: total size of the read which the caller is performing in |
450 | * PAGE_CACHE_SIZE units | 453 | * PAGE_CACHE_SIZE units |
451 | * | 454 | * |
452 | * page_cache_readahead() is the main function. If performs the adaptive | 455 | * page_cache_readahead() is the main function. It performs the adaptive |
453 | * readahead window size management and submits the readahead I/O. | 456 | * readahead window size management and submits the readahead I/O. |
454 | * | 457 | * |
455 | * Note that @filp is purely used for passing on to the ->readpage[s]() | 458 | * Note that @filp is purely used for passing on to the ->readpage[s]() |