diff options
| -rw-r--r-- | fs/btrfs/compression.c | 22 | ||||
| -rw-r--r-- | fs/btrfs/extent_io.c | 15 |
2 files changed, 5 insertions, 32 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 28b92a7218ab..1d54c5308df5 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
| @@ -31,7 +31,6 @@ | |||
| 31 | #include <linux/swap.h> | 31 | #include <linux/swap.h> |
| 32 | #include <linux/writeback.h> | 32 | #include <linux/writeback.h> |
| 33 | #include <linux/bit_spinlock.h> | 33 | #include <linux/bit_spinlock.h> |
| 34 | #include <linux/pagevec.h> | ||
| 35 | #include "compat.h" | 34 | #include "compat.h" |
| 36 | #include "ctree.h" | 35 | #include "ctree.h" |
| 37 | #include "disk-io.h" | 36 | #include "disk-io.h" |
| @@ -445,7 +444,6 @@ static noinline int add_ra_bio_pages(struct inode *inode, | |||
| 445 | unsigned long nr_pages = 0; | 444 | unsigned long nr_pages = 0; |
| 446 | struct extent_map *em; | 445 | struct extent_map *em; |
| 447 | struct address_space *mapping = inode->i_mapping; | 446 | struct address_space *mapping = inode->i_mapping; |
| 448 | struct pagevec pvec; | ||
| 449 | struct extent_map_tree *em_tree; | 447 | struct extent_map_tree *em_tree; |
| 450 | struct extent_io_tree *tree; | 448 | struct extent_io_tree *tree; |
| 451 | u64 end; | 449 | u64 end; |
| @@ -461,7 +459,6 @@ static noinline int add_ra_bio_pages(struct inode *inode, | |||
| 461 | 459 | ||
| 462 | end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; | 460 | end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; |
| 463 | 461 | ||
| 464 | pagevec_init(&pvec, 0); | ||
| 465 | while (last_offset < compressed_end) { | 462 | while (last_offset < compressed_end) { |
| 466 | page_index = last_offset >> PAGE_CACHE_SHIFT; | 463 | page_index = last_offset >> PAGE_CACHE_SHIFT; |
| 467 | 464 | ||
| @@ -478,26 +475,17 @@ static noinline int add_ra_bio_pages(struct inode *inode, | |||
| 478 | goto next; | 475 | goto next; |
| 479 | } | 476 | } |
| 480 | 477 | ||
| 481 | page = alloc_page(mapping_gfp_mask(mapping) & ~__GFP_FS); | 478 | page = __page_cache_alloc(mapping_gfp_mask(mapping) & |
| 479 | ~__GFP_FS); | ||
| 482 | if (!page) | 480 | if (!page) |
| 483 | break; | 481 | break; |
| 484 | 482 | ||
| 485 | page->index = page_index; | 483 | if (add_to_page_cache_lru(page, mapping, page_index, |
| 486 | /* | 484 | GFP_NOFS)) { |
| 487 | * what we want to do here is call add_to_page_cache_lru, | ||
| 488 | * but that isn't exported, so we reproduce it here | ||
| 489 | */ | ||
| 490 | if (add_to_page_cache(page, mapping, | ||
| 491 | page->index, GFP_NOFS)) { | ||
| 492 | page_cache_release(page); | 485 | page_cache_release(page); |
| 493 | goto next; | 486 | goto next; |
| 494 | } | 487 | } |
| 495 | 488 | ||
| 496 | /* open coding of lru_cache_add, also not exported */ | ||
| 497 | page_cache_get(page); | ||
| 498 | if (!pagevec_add(&pvec, page)) | ||
| 499 | __pagevec_lru_add_file(&pvec); | ||
| 500 | |||
| 501 | end = last_offset + PAGE_CACHE_SIZE - 1; | 489 | end = last_offset + PAGE_CACHE_SIZE - 1; |
| 502 | /* | 490 | /* |
| 503 | * at this point, we have a locked page in the page cache | 491 | * at this point, we have a locked page in the page cache |
| @@ -551,8 +539,6 @@ static noinline int add_ra_bio_pages(struct inode *inode, | |||
| 551 | next: | 539 | next: |
| 552 | last_offset += PAGE_CACHE_SIZE; | 540 | last_offset += PAGE_CACHE_SIZE; |
| 553 | } | 541 | } |
| 554 | if (pagevec_count(&pvec)) | ||
| 555 | __pagevec_lru_add_file(&pvec); | ||
| 556 | return 0; | 542 | return 0; |
| 557 | } | 543 | } |
| 558 | 544 | ||
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index c99121ac5d6b..fc742e59815e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
| @@ -2679,33 +2679,20 @@ int extent_readpages(struct extent_io_tree *tree, | |||
| 2679 | { | 2679 | { |
| 2680 | struct bio *bio = NULL; | 2680 | struct bio *bio = NULL; |
| 2681 | unsigned page_idx; | 2681 | unsigned page_idx; |
| 2682 | struct pagevec pvec; | ||
| 2683 | unsigned long bio_flags = 0; | 2682 | unsigned long bio_flags = 0; |
| 2684 | 2683 | ||
| 2685 | pagevec_init(&pvec, 0); | ||
| 2686 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { | 2684 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
| 2687 | struct page *page = list_entry(pages->prev, struct page, lru); | 2685 | struct page *page = list_entry(pages->prev, struct page, lru); |
| 2688 | 2686 | ||
| 2689 | prefetchw(&page->flags); | 2687 | prefetchw(&page->flags); |
| 2690 | list_del(&page->lru); | 2688 | list_del(&page->lru); |
| 2691 | /* | 2689 | if (!add_to_page_cache_lru(page, mapping, |
| 2692 | * what we want to do here is call add_to_page_cache_lru, | ||
| 2693 | * but that isn't exported, so we reproduce it here | ||
| 2694 | */ | ||
| 2695 | if (!add_to_page_cache(page, mapping, | ||
| 2696 | page->index, GFP_KERNEL)) { | 2690 | page->index, GFP_KERNEL)) { |
| 2697 | |||
| 2698 | /* open coding of lru_cache_add, also not exported */ | ||
| 2699 | page_cache_get(page); | ||
| 2700 | if (!pagevec_add(&pvec, page)) | ||
| 2701 | __pagevec_lru_add_file(&pvec); | ||
| 2702 | __extent_read_full_page(tree, page, get_extent, | 2691 | __extent_read_full_page(tree, page, get_extent, |
| 2703 | &bio, 0, &bio_flags); | 2692 | &bio, 0, &bio_flags); |
| 2704 | } | 2693 | } |
| 2705 | page_cache_release(page); | 2694 | page_cache_release(page); |
| 2706 | } | 2695 | } |
| 2707 | if (pagevec_count(&pvec)) | ||
| 2708 | __pagevec_lru_add_file(&pvec); | ||
| 2709 | BUG_ON(!list_empty(pages)); | 2696 | BUG_ON(!list_empty(pages)); |
| 2710 | if (bio) | 2697 | if (bio) |
| 2711 | submit_one_bio(READ, bio, 0, bio_flags); | 2698 | submit_one_bio(READ, bio, 0, bio_flags); |
