aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2010-03-17 09:31:04 -0400
committerChris Mason <chris.mason@oracle.com>2010-04-05 14:41:51 -0400
commit28ecb60906e86e74e9ad4ac7e0218d8631e73a94 (patch)
tree7214fa7bfb39f789aa4a78e174e39f61ef101b95 /fs/btrfs
parent0cad8a1130f77c7c445e3298c0e3593b3c0ef439 (diff)
Btrfs: use add_to_page_cache_lru, use __page_cache_alloc
Pagecache pages should be allocated with __page_cache_alloc, so they obey pagecache memory policies. add_to_page_cache_lru is exported, so it should be used. Benefits over using a private pagevec: neater code, 128 bytes fewer stack used, percpu lru ordering is preserved, and finally don't need to flush pagevec before returning so batching may be shared with other LRU insertions. Signed-off-by: Nick Piggin <npiggin@suse.de>: Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/compression.c22
-rw-r--r--fs/btrfs/extent_io.c15
2 files changed, 5 insertions, 32 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 28b92a7218ab..1d54c5308df5 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -31,7 +31,6 @@
31#include <linux/swap.h> 31#include <linux/swap.h>
32#include <linux/writeback.h> 32#include <linux/writeback.h>
33#include <linux/bit_spinlock.h> 33#include <linux/bit_spinlock.h>
34#include <linux/pagevec.h>
35#include "compat.h" 34#include "compat.h"
36#include "ctree.h" 35#include "ctree.h"
37#include "disk-io.h" 36#include "disk-io.h"
@@ -445,7 +444,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
445 unsigned long nr_pages = 0; 444 unsigned long nr_pages = 0;
446 struct extent_map *em; 445 struct extent_map *em;
447 struct address_space *mapping = inode->i_mapping; 446 struct address_space *mapping = inode->i_mapping;
448 struct pagevec pvec;
449 struct extent_map_tree *em_tree; 447 struct extent_map_tree *em_tree;
450 struct extent_io_tree *tree; 448 struct extent_io_tree *tree;
451 u64 end; 449 u64 end;
@@ -461,7 +459,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
461 459
462 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 460 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
463 461
464 pagevec_init(&pvec, 0);
465 while (last_offset < compressed_end) { 462 while (last_offset < compressed_end) {
466 page_index = last_offset >> PAGE_CACHE_SHIFT; 463 page_index = last_offset >> PAGE_CACHE_SHIFT;
467 464
@@ -478,26 +475,17 @@ static noinline int add_ra_bio_pages(struct inode *inode,
478 goto next; 475 goto next;
479 } 476 }
480 477
481 page = alloc_page(mapping_gfp_mask(mapping) & ~__GFP_FS); 478 page = __page_cache_alloc(mapping_gfp_mask(mapping) &
479 ~__GFP_FS);
482 if (!page) 480 if (!page)
483 break; 481 break;
484 482
485 page->index = page_index; 483 if (add_to_page_cache_lru(page, mapping, page_index,
486 /* 484 GFP_NOFS)) {
487 * what we want to do here is call add_to_page_cache_lru,
488 * but that isn't exported, so we reproduce it here
489 */
490 if (add_to_page_cache(page, mapping,
491 page->index, GFP_NOFS)) {
492 page_cache_release(page); 485 page_cache_release(page);
493 goto next; 486 goto next;
494 } 487 }
495 488
496 /* open coding of lru_cache_add, also not exported */
497 page_cache_get(page);
498 if (!pagevec_add(&pvec, page))
499 __pagevec_lru_add_file(&pvec);
500
501 end = last_offset + PAGE_CACHE_SIZE - 1; 489 end = last_offset + PAGE_CACHE_SIZE - 1;
502 /* 490 /*
503 * at this point, we have a locked page in the page cache 491 * at this point, we have a locked page in the page cache
@@ -551,8 +539,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
551next: 539next:
552 last_offset += PAGE_CACHE_SIZE; 540 last_offset += PAGE_CACHE_SIZE;
553 } 541 }
554 if (pagevec_count(&pvec))
555 __pagevec_lru_add_file(&pvec);
556 return 0; 542 return 0;
557} 543}
558 544
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c99121ac5d6b..fc742e59815e 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2679,33 +2679,20 @@ int extent_readpages(struct extent_io_tree *tree,
2679{ 2679{
2680 struct bio *bio = NULL; 2680 struct bio *bio = NULL;
2681 unsigned page_idx; 2681 unsigned page_idx;
2682 struct pagevec pvec;
2683 unsigned long bio_flags = 0; 2682 unsigned long bio_flags = 0;
2684 2683
2685 pagevec_init(&pvec, 0);
2686 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 2684 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2687 struct page *page = list_entry(pages->prev, struct page, lru); 2685 struct page *page = list_entry(pages->prev, struct page, lru);
2688 2686
2689 prefetchw(&page->flags); 2687 prefetchw(&page->flags);
2690 list_del(&page->lru); 2688 list_del(&page->lru);
2691 /* 2689 if (!add_to_page_cache_lru(page, mapping,
2692 * what we want to do here is call add_to_page_cache_lru,
2693 * but that isn't exported, so we reproduce it here
2694 */
2695 if (!add_to_page_cache(page, mapping,
2696 page->index, GFP_KERNEL)) { 2690 page->index, GFP_KERNEL)) {
2697
2698 /* open coding of lru_cache_add, also not exported */
2699 page_cache_get(page);
2700 if (!pagevec_add(&pvec, page))
2701 __pagevec_lru_add_file(&pvec);
2702 __extent_read_full_page(tree, page, get_extent, 2691 __extent_read_full_page(tree, page, get_extent,
2703 &bio, 0, &bio_flags); 2692 &bio, 0, &bio_flags);
2704 } 2693 }
2705 page_cache_release(page); 2694 page_cache_release(page);
2706 } 2695 }
2707 if (pagevec_count(&pvec))
2708 __pagevec_lru_add_file(&pvec);
2709 BUG_ON(!list_empty(pages)); 2696 BUG_ON(!list_empty(pages));
2710 if (bio) 2697 if (bio)
2711 submit_one_bio(READ, bio, 0, bio_flags); 2698 submit_one_bio(READ, bio, 0, bio_flags);