aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2009-06-16 18:31:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:29 -0400
commitd30a11004e3411909f2448546f036a011978062e (patch)
treec1980adb410d9fabd2c2eb8af9f0ed8ee4b656da
parent2fad6f5deee5556f511eab58da78737a23ddb35d (diff)
readahead: record mmap read-around states in file_ra_state
Mmap read-around now shares the same code style and data structure with readahead code. This also removes do_page_cache_readahead(). Its last user, mmap read-around, has been changed to call ra_submit(). The no-readahead-if-congested logic is dumped by the way. Users will be pretty sensitive about the slow loading of executables. So it's unfavorable to disabled mmap read-around on a congested queue. [akpm@linux-foundation.org: coding-style fixes] Cc: Nick Piggin <npiggin@suse.de> Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn> Cc: Ying Han <yinghan@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mm.h5
-rw-r--r--mm/filemap.c12
-rw-r--r--mm/readahead.c23
3 files changed, 12 insertions, 28 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ad613ed66ab0..33da7f538841 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1178,8 +1178,6 @@ void task_dirty_inc(struct task_struct *tsk);
1178#define VM_MAX_READAHEAD 128 /* kbytes */ 1178#define VM_MAX_READAHEAD 128 /* kbytes */
1179#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ 1179#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
1180 1180
1181int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
1182 pgoff_t offset, unsigned long nr_to_read);
1183int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 1181int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1184 pgoff_t offset, unsigned long nr_to_read); 1182 pgoff_t offset, unsigned long nr_to_read);
1185 1183
@@ -1197,6 +1195,9 @@ void page_cache_async_readahead(struct address_space *mapping,
1197 unsigned long size); 1195 unsigned long size);
1198 1196
1199unsigned long max_sane_readahead(unsigned long nr); 1197unsigned long max_sane_readahead(unsigned long nr);
1198unsigned long ra_submit(struct file_ra_state *ra,
1199 struct address_space *mapping,
1200 struct file *filp);
1200 1201
1201/* Do stack extension */ 1202/* Do stack extension */
1202extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 1203extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
diff --git a/mm/filemap.c b/mm/filemap.c
index 5c0c6518f341..734891d0663d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1488,13 +1488,15 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1488 if (ra->mmap_miss > MMAP_LOTSAMISS) 1488 if (ra->mmap_miss > MMAP_LOTSAMISS)
1489 return; 1489 return;
1490 1490
1491 /*
1492 * mmap read-around
1493 */
1491 ra_pages = max_sane_readahead(ra->ra_pages); 1494 ra_pages = max_sane_readahead(ra->ra_pages);
1492 if (ra_pages) { 1495 if (ra_pages) {
1493 pgoff_t start = 0; 1496 ra->start = max_t(long, 0, offset - ra_pages/2);
1494 1497 ra->size = ra_pages;
1495 if (offset > ra_pages / 2) 1498 ra->async_size = 0;
1496 start = offset - ra_pages / 2; 1499 ra_submit(ra, mapping, file);
1497 do_page_cache_readahead(mapping, file, start, ra_pages);
1498 } 1500 }
1499} 1501}
1500 1502
diff --git a/mm/readahead.c b/mm/readahead.c
index d7c6e143a129..a7f01fcce9e7 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -133,15 +133,12 @@ out:
133} 133}
134 134
135/* 135/*
136 * do_page_cache_readahead actually reads a chunk of disk. It allocates all 136 * __do_page_cache_readahead() actually reads a chunk of disk. It allocates all
137 * the pages first, then submits them all for I/O. This avoids the very bad 137 * the pages first, then submits them all for I/O. This avoids the very bad
138 * behaviour which would occur if page allocations are causing VM writeback. 138 * behaviour which would occur if page allocations are causing VM writeback.
139 * We really don't want to intermingle reads and writes like that. 139 * We really don't want to intermingle reads and writes like that.
140 * 140 *
141 * Returns the number of pages requested, or the maximum amount of I/O allowed. 141 * Returns the number of pages requested, or the maximum amount of I/O allowed.
142 *
143 * do_page_cache_readahead() returns -1 if it encountered request queue
144 * congestion.
145 */ 142 */
146static int 143static int
147__do_page_cache_readahead(struct address_space *mapping, struct file *filp, 144__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
@@ -232,22 +229,6 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
232} 229}
233 230
234/* 231/*
235 * This version skips the IO if the queue is read-congested, and will tell the
236 * block layer to abandon the readahead if request allocation would block.
237 *
238 * force_page_cache_readahead() will ignore queue congestion and will block on
239 * request queues.
240 */
241int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
242 pgoff_t offset, unsigned long nr_to_read)
243{
244 if (bdi_read_congested(mapping->backing_dev_info))
245 return -1;
246
247 return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0);
248}
249
250/*
251 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a 232 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
252 * sensible upper limit. 233 * sensible upper limit.
253 */ 234 */
@@ -260,7 +241,7 @@ unsigned long max_sane_readahead(unsigned long nr)
260/* 241/*
261 * Submit IO for the read-ahead request in file_ra_state. 242 * Submit IO for the read-ahead request in file_ra_state.
262 */ 243 */
263static unsigned long ra_submit(struct file_ra_state *ra, 244unsigned long ra_submit(struct file_ra_state *ra,
264 struct address_space *mapping, struct file *filp) 245 struct address_space *mapping, struct file *filp)
265{ 246{
266 int actual; 247 int actual;