aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-10-16 04:24:57 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:54 -0400
commiteb2be189317d031895b5ca534fbf735eb546158b (patch)
tree8f1eda7af3be7285244a6f1ad77682e90d403c7b /fs
parent64649a58919e66ec21792dbb6c48cb3da22cbd7f (diff)
mm: buffered write cleanup
Quite a bit of code is used in maintaining these "cached pages" that are probably pretty unlikely to get used. It would require a narrow race where the page is inserted concurrently while this process is allocating a page in order to create the spare page. Then a multi-page write into an uncached part of the file, to make use of it. Next, the buffered write path (and others) uses its own LRU pagevec when it should be just using the per-CPU LRU pagevec (which will cut down on both data and code size cacheline footprint). Also, these private LRU pagevecs are emptied after just a very short time, in contrast with the per-CPU pagevecs that are persistent. Net result: 7.3 times fewer lru_lock acquisitions required to add the pages to pagecache for a bulk write (in 4K chunks). [this gets rid of some cond_resched() calls in readahead.c and mpage.c due to clashes in -mm. What put them there, and why? ] Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/mpage.c10
1 files changed, 2 insertions, 8 deletions
diff --git a/fs/mpage.c b/fs/mpage.c
index b1c3e5890508..d54f8f897224 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -379,31 +379,25 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
379 struct bio *bio = NULL; 379 struct bio *bio = NULL;
380 unsigned page_idx; 380 unsigned page_idx;
381 sector_t last_block_in_bio = 0; 381 sector_t last_block_in_bio = 0;
382 struct pagevec lru_pvec;
383 struct buffer_head map_bh; 382 struct buffer_head map_bh;
384 unsigned long first_logical_block = 0; 383 unsigned long first_logical_block = 0;
385 384
386 clear_buffer_mapped(&map_bh); 385 clear_buffer_mapped(&map_bh);
387 pagevec_init(&lru_pvec, 0);
388 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 386 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
389 struct page *page = list_entry(pages->prev, struct page, lru); 387 struct page *page = list_entry(pages->prev, struct page, lru);
390 388
391 prefetchw(&page->flags); 389 prefetchw(&page->flags);
392 list_del(&page->lru); 390 list_del(&page->lru);
393 if (!add_to_page_cache(page, mapping, 391 if (!add_to_page_cache_lru(page, mapping,
394 page->index, GFP_KERNEL)) { 392 page->index, GFP_KERNEL)) {
395 bio = do_mpage_readpage(bio, page, 393 bio = do_mpage_readpage(bio, page,
396 nr_pages - page_idx, 394 nr_pages - page_idx,
397 &last_block_in_bio, &map_bh, 395 &last_block_in_bio, &map_bh,
398 &first_logical_block, 396 &first_logical_block,
399 get_block); 397 get_block);
400 if (!pagevec_add(&lru_pvec, page))
401 __pagevec_lru_add(&lru_pvec);
402 } else {
403 page_cache_release(page);
404 } 398 }
399 page_cache_release(page);
405 } 400 }
406 pagevec_lru_add(&lru_pvec);
407 BUG_ON(!list_empty(pages)); 401 BUG_ON(!list_empty(pages));
408 if (bio) 402 if (bio)
409 mpage_bio_submit(READ, bio); 403 mpage_bio_submit(READ, bio);