aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/compression.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-04-05 16:21:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-04-05 16:21:15 -0400
commit795d580baec0d5386b83a8b557df47c20810e86b (patch)
treed0387c37562e9e27a4f43cf7ae425319cbdad359 /fs/btrfs/compression.c
parent449cedf099b23a250e7d61982e35555ccb871182 (diff)
parent109f6aef5fc436f355ad027f4d97bd696df2049a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: Btrfs: add check for changed leaves in setup_leaf_for_split Btrfs: create snapshot references in same commit as snapshot Btrfs: fix small race with delalloc flushing waitqueue's Btrfs: use add_to_page_cache_lru, use __page_cache_alloc Btrfs: fix chunk allocate size calculation Btrfs: kill max_extent mount option Btrfs: fail to mount if we have problems reading the block groups Btrfs: check btrfs_get_extent return for IS_ERR() Btrfs: handle kmalloc() failure in inode lookup ioctl Btrfs: dereferencing freed memory Btrfs: Simplify num_stripes's calculation logical for __btrfs_alloc_chunk() Btrfs: Add error handle for btrfs_search_slot() in btrfs_read_chunk_tree() Btrfs: Remove unnecessary finish_wait() in wait_current_trans() Btrfs: add NULL check for do_walk_down() Btrfs: remove duplicate include in ioctl.c Fix trivial conflict in fs/btrfs/compression.c due to slab.h include cleanups.
Diffstat (limited to 'fs/btrfs/compression.c')
-rw-r--r--fs/btrfs/compression.c22
1 files changed, 4 insertions, 18 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 629880ec8ac4..396039b3a8a2 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -31,7 +31,6 @@
31#include <linux/swap.h> 31#include <linux/swap.h>
32#include <linux/writeback.h> 32#include <linux/writeback.h>
33#include <linux/bit_spinlock.h> 33#include <linux/bit_spinlock.h>
34#include <linux/pagevec.h>
35#include <linux/slab.h> 34#include <linux/slab.h>
36#include "compat.h" 35#include "compat.h"
37#include "ctree.h" 36#include "ctree.h"
@@ -446,7 +445,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
446 unsigned long nr_pages = 0; 445 unsigned long nr_pages = 0;
447 struct extent_map *em; 446 struct extent_map *em;
448 struct address_space *mapping = inode->i_mapping; 447 struct address_space *mapping = inode->i_mapping;
449 struct pagevec pvec;
450 struct extent_map_tree *em_tree; 448 struct extent_map_tree *em_tree;
451 struct extent_io_tree *tree; 449 struct extent_io_tree *tree;
452 u64 end; 450 u64 end;
@@ -462,7 +460,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
462 460
463 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 461 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
464 462
465 pagevec_init(&pvec, 0);
466 while (last_offset < compressed_end) { 463 while (last_offset < compressed_end) {
467 page_index = last_offset >> PAGE_CACHE_SHIFT; 464 page_index = last_offset >> PAGE_CACHE_SHIFT;
468 465
@@ -479,26 +476,17 @@ static noinline int add_ra_bio_pages(struct inode *inode,
479 goto next; 476 goto next;
480 } 477 }
481 478
482 page = alloc_page(mapping_gfp_mask(mapping) & ~__GFP_FS); 479 page = __page_cache_alloc(mapping_gfp_mask(mapping) &
480 ~__GFP_FS);
483 if (!page) 481 if (!page)
484 break; 482 break;
485 483
486 page->index = page_index; 484 if (add_to_page_cache_lru(page, mapping, page_index,
487 /* 485 GFP_NOFS)) {
488 * what we want to do here is call add_to_page_cache_lru,
489 * but that isn't exported, so we reproduce it here
490 */
491 if (add_to_page_cache(page, mapping,
492 page->index, GFP_NOFS)) {
493 page_cache_release(page); 486 page_cache_release(page);
494 goto next; 487 goto next;
495 } 488 }
496 489
497 /* open coding of lru_cache_add, also not exported */
498 page_cache_get(page);
499 if (!pagevec_add(&pvec, page))
500 __pagevec_lru_add_file(&pvec);
501
502 end = last_offset + PAGE_CACHE_SIZE - 1; 490 end = last_offset + PAGE_CACHE_SIZE - 1;
503 /* 491 /*
504 * at this point, we have a locked page in the page cache 492 * at this point, we have a locked page in the page cache
@@ -552,8 +540,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
552next: 540next:
553 last_offset += PAGE_CACHE_SIZE; 541 last_offset += PAGE_CACHE_SIZE;
554 } 542 }
555 if (pagevec_count(&pvec))
556 __pagevec_lru_add_file(&pvec);
557 return 0; 543 return 0;
558} 544}
559 545