aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/compression.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2010-03-17 09:31:04 -0400
committerChris Mason <chris.mason@oracle.com>2010-04-05 14:41:51 -0400
commit28ecb60906e86e74e9ad4ac7e0218d8631e73a94 (patch)
tree7214fa7bfb39f789aa4a78e174e39f61ef101b95 /fs/btrfs/compression.c
parent0cad8a1130f77c7c445e3298c0e3593b3c0ef439 (diff)
Btrfs: use add_to_page_cache_lru, use __page_cache_alloc
Pagecache pages should be allocated with __page_cache_alloc, so they obey pagecache memory policies. add_to_page_cache_lru is exported, so it should be used. Benefits over using a private pagevec: neater code, 128 bytes fewer stack used, percpu lru ordering is preserved, and finally don't need to flush pagevec before returning so batching may be shared with other LRU insertions. Signed-off-by: Nick Piggin <npiggin@suse.de>: Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/compression.c')
-rw-r--r--fs/btrfs/compression.c22
1 files changed, 4 insertions, 18 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 28b92a7218ab..1d54c5308df5 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -31,7 +31,6 @@
31#include <linux/swap.h> 31#include <linux/swap.h>
32#include <linux/writeback.h> 32#include <linux/writeback.h>
33#include <linux/bit_spinlock.h> 33#include <linux/bit_spinlock.h>
34#include <linux/pagevec.h>
35#include "compat.h" 34#include "compat.h"
36#include "ctree.h" 35#include "ctree.h"
37#include "disk-io.h" 36#include "disk-io.h"
@@ -445,7 +444,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
445 unsigned long nr_pages = 0; 444 unsigned long nr_pages = 0;
446 struct extent_map *em; 445 struct extent_map *em;
447 struct address_space *mapping = inode->i_mapping; 446 struct address_space *mapping = inode->i_mapping;
448 struct pagevec pvec;
449 struct extent_map_tree *em_tree; 447 struct extent_map_tree *em_tree;
450 struct extent_io_tree *tree; 448 struct extent_io_tree *tree;
451 u64 end; 449 u64 end;
@@ -461,7 +459,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
461 459
462 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 460 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
463 461
464 pagevec_init(&pvec, 0);
465 while (last_offset < compressed_end) { 462 while (last_offset < compressed_end) {
466 page_index = last_offset >> PAGE_CACHE_SHIFT; 463 page_index = last_offset >> PAGE_CACHE_SHIFT;
467 464
@@ -478,26 +475,17 @@ static noinline int add_ra_bio_pages(struct inode *inode,
478 goto next; 475 goto next;
479 } 476 }
480 477
481 page = alloc_page(mapping_gfp_mask(mapping) & ~__GFP_FS); 478 page = __page_cache_alloc(mapping_gfp_mask(mapping) &
479 ~__GFP_FS);
482 if (!page) 480 if (!page)
483 break; 481 break;
484 482
485 page->index = page_index; 483 if (add_to_page_cache_lru(page, mapping, page_index,
486 /* 484 GFP_NOFS)) {
487 * what we want to do here is call add_to_page_cache_lru,
488 * but that isn't exported, so we reproduce it here
489 */
490 if (add_to_page_cache(page, mapping,
491 page->index, GFP_NOFS)) {
492 page_cache_release(page); 485 page_cache_release(page);
493 goto next; 486 goto next;
494 } 487 }
495 488
496 /* open coding of lru_cache_add, also not exported */
497 page_cache_get(page);
498 if (!pagevec_add(&pvec, page))
499 __pagevec_lru_add_file(&pvec);
500
501 end = last_offset + PAGE_CACHE_SIZE - 1; 489 end = last_offset + PAGE_CACHE_SIZE - 1;
502 /* 490 /*
503 * at this point, we have a locked page in the page cache 491 * at this point, we have a locked page in the page cache
@@ -551,8 +539,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
551next: 539next:
552 last_offset += PAGE_CACHE_SIZE; 540 last_offset += PAGE_CACHE_SIZE;
553 } 541 }
554 if (pagevec_count(&pvec))
555 __pagevec_lru_add_file(&pvec);
556 return 0; 542 return 0;
557} 543}
558 544