diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/btrfs/disk-io.c | 26 | ||||
-rw-r--r-- | fs/btrfs/extent_io.c | 309 | ||||
-rw-r--r-- | fs/btrfs/extent_io.h | 11 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 3 |
4 files changed, 129 insertions, 220 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 99bb385c2982..86e84a8579e3 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -381,7 +381,6 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, | |||
381 | 381 | ||
382 | end = min_t(u64, eb->len, PAGE_CACHE_SIZE); | 382 | end = min_t(u64, eb->len, PAGE_CACHE_SIZE); |
383 | end = eb->start + end - 1; | 383 | end = eb->start + end - 1; |
384 | release_extent_buffer_tail_pages(eb); | ||
385 | err: | 384 | err: |
386 | free_extent_buffer(eb); | 385 | free_extent_buffer(eb); |
387 | out: | 386 | out: |
@@ -563,21 +562,21 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags) | |||
563 | struct extent_map_tree *map; | 562 | struct extent_map_tree *map; |
564 | int ret; | 563 | int ret; |
565 | 564 | ||
566 | if (page_count(page) > 3) { | ||
567 | /* once for page->private, once for the caller, once | ||
568 | * once for the page cache | ||
569 | */ | ||
570 | return 0; | ||
571 | } | ||
572 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 565 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
573 | map = &BTRFS_I(page->mapping->host)->extent_tree; | 566 | map = &BTRFS_I(page->mapping->host)->extent_tree; |
567 | |||
574 | ret = try_release_extent_state(map, tree, page, gfp_flags); | 568 | ret = try_release_extent_state(map, tree, page, gfp_flags); |
569 | if (!ret) { | ||
570 | return 0; | ||
571 | } | ||
572 | |||
573 | ret = try_release_extent_buffer(tree, page); | ||
575 | if (ret == 1) { | 574 | if (ret == 1) { |
576 | invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE); | ||
577 | ClearPagePrivate(page); | 575 | ClearPagePrivate(page); |
578 | set_page_private(page, 0); | 576 | set_page_private(page, 0); |
579 | page_cache_release(page); | 577 | page_cache_release(page); |
580 | } | 578 | } |
579 | |||
581 | return ret; | 580 | return ret; |
582 | } | 581 | } |
583 | 582 | ||
@@ -588,7 +587,8 @@ static void btree_invalidatepage(struct page *page, unsigned long offset) | |||
588 | extent_invalidatepage(tree, page, offset); | 587 | extent_invalidatepage(tree, page, offset); |
589 | btree_releasepage(page, GFP_NOFS); | 588 | btree_releasepage(page, GFP_NOFS); |
590 | if (PagePrivate(page)) { | 589 | if (PagePrivate(page)) { |
591 | invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE); | 590 | printk("warning page private not zero on page %Lu\n", |
591 | page_offset(page)); | ||
592 | ClearPagePrivate(page); | 592 | ClearPagePrivate(page); |
593 | set_page_private(page, 0); | 593 | set_page_private(page, 0); |
594 | page_cache_release(page); | 594 | page_cache_release(page); |
@@ -1456,7 +1456,6 @@ fail_tree_root: | |||
1456 | free_extent_buffer(tree_root->node); | 1456 | free_extent_buffer(tree_root->node); |
1457 | fail_sys_array: | 1457 | fail_sys_array: |
1458 | fail_sb_buffer: | 1458 | fail_sb_buffer: |
1459 | extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree); | ||
1460 | btrfs_stop_workers(&fs_info->fixup_workers); | 1459 | btrfs_stop_workers(&fs_info->fixup_workers); |
1461 | btrfs_stop_workers(&fs_info->workers); | 1460 | btrfs_stop_workers(&fs_info->workers); |
1462 | btrfs_stop_workers(&fs_info->endio_workers); | 1461 | btrfs_stop_workers(&fs_info->endio_workers); |
@@ -1705,13 +1704,6 @@ int close_ctree(struct btrfs_root *root) | |||
1705 | 1704 | ||
1706 | filemap_write_and_wait(fs_info->btree_inode->i_mapping); | 1705 | filemap_write_and_wait(fs_info->btree_inode->i_mapping); |
1707 | 1706 | ||
1708 | extent_io_tree_empty_lru(&fs_info->free_space_cache); | ||
1709 | extent_io_tree_empty_lru(&fs_info->block_group_cache); | ||
1710 | extent_io_tree_empty_lru(&fs_info->pinned_extents); | ||
1711 | extent_io_tree_empty_lru(&fs_info->pending_del); | ||
1712 | extent_io_tree_empty_lru(&fs_info->extent_ins); | ||
1713 | extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree); | ||
1714 | |||
1715 | truncate_inode_pages(fs_info->btree_inode->i_mapping, 0); | 1707 | truncate_inode_pages(fs_info->btree_inode->i_mapping, 0); |
1716 | 1708 | ||
1717 | btrfs_stop_workers(&fs_info->fixup_workers); | 1709 | btrfs_stop_workers(&fs_info->fixup_workers); |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index d4a63ae7ed1b..32bb4ed3723d 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -91,29 +91,16 @@ void extent_io_tree_init(struct extent_io_tree *tree, | |||
91 | struct address_space *mapping, gfp_t mask) | 91 | struct address_space *mapping, gfp_t mask) |
92 | { | 92 | { |
93 | tree->state.rb_node = NULL; | 93 | tree->state.rb_node = NULL; |
94 | tree->buffer.rb_node = NULL; | ||
94 | tree->ops = NULL; | 95 | tree->ops = NULL; |
95 | tree->dirty_bytes = 0; | 96 | tree->dirty_bytes = 0; |
96 | spin_lock_init(&tree->lock); | 97 | spin_lock_init(&tree->lock); |
97 | spin_lock_init(&tree->lru_lock); | 98 | spin_lock_init(&tree->buffer_lock); |
98 | tree->mapping = mapping; | 99 | tree->mapping = mapping; |
99 | INIT_LIST_HEAD(&tree->buffer_lru); | ||
100 | tree->lru_size = 0; | ||
101 | tree->last = NULL; | 100 | tree->last = NULL; |
102 | } | 101 | } |
103 | EXPORT_SYMBOL(extent_io_tree_init); | 102 | EXPORT_SYMBOL(extent_io_tree_init); |
104 | 103 | ||
105 | void extent_io_tree_empty_lru(struct extent_io_tree *tree) | ||
106 | { | ||
107 | struct extent_buffer *eb; | ||
108 | while(!list_empty(&tree->buffer_lru)) { | ||
109 | eb = list_entry(tree->buffer_lru.next, struct extent_buffer, | ||
110 | lru); | ||
111 | list_del_init(&eb->lru); | ||
112 | free_extent_buffer(eb); | ||
113 | } | ||
114 | } | ||
115 | EXPORT_SYMBOL(extent_io_tree_empty_lru); | ||
116 | |||
117 | struct extent_state *alloc_extent_state(gfp_t mask) | 104 | struct extent_state *alloc_extent_state(gfp_t mask) |
118 | { | 105 | { |
119 | struct extent_state *state; | 106 | struct extent_state *state; |
@@ -245,6 +232,50 @@ static inline struct rb_node *tree_search(struct extent_io_tree *tree, | |||
245 | return ret; | 232 | return ret; |
246 | } | 233 | } |
247 | 234 | ||
235 | static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree, | ||
236 | u64 offset, struct rb_node *node) | ||
237 | { | ||
238 | struct rb_root *root = &tree->buffer; | ||
239 | struct rb_node ** p = &root->rb_node; | ||
240 | struct rb_node * parent = NULL; | ||
241 | struct extent_buffer *eb; | ||
242 | |||
243 | while(*p) { | ||
244 | parent = *p; | ||
245 | eb = rb_entry(parent, struct extent_buffer, rb_node); | ||
246 | |||
247 | if (offset < eb->start) | ||
248 | p = &(*p)->rb_left; | ||
249 | else if (offset > eb->start) | ||
250 | p = &(*p)->rb_right; | ||
251 | else | ||
252 | return eb; | ||
253 | } | ||
254 | |||
255 | rb_link_node(node, parent, p); | ||
256 | rb_insert_color(node, root); | ||
257 | return NULL; | ||
258 | } | ||
259 | |||
260 | static struct extent_buffer *buffer_search(struct extent_io_tree *tree, | ||
261 | u64 offset) | ||
262 | { | ||
263 | struct rb_root *root = &tree->buffer; | ||
264 | struct rb_node * n = root->rb_node; | ||
265 | struct extent_buffer *eb; | ||
266 | |||
267 | while(n) { | ||
268 | eb = rb_entry(n, struct extent_buffer, rb_node); | ||
269 | if (offset < eb->start) | ||
270 | n = n->rb_left; | ||
271 | else if (offset > eb->start) | ||
272 | n = n->rb_right; | ||
273 | else | ||
274 | return eb; | ||
275 | } | ||
276 | return NULL; | ||
277 | } | ||
278 | |||
248 | /* | 279 | /* |
249 | * utility function to look for merge candidates inside a given range. | 280 | * utility function to look for merge candidates inside a given range. |
250 | * Any extents with matching state are merged together into a single | 281 | * Any extents with matching state are merged together into a single |
@@ -1817,9 +1848,8 @@ void set_page_extent_mapped(struct page *page) | |||
1817 | { | 1848 | { |
1818 | if (!PagePrivate(page)) { | 1849 | if (!PagePrivate(page)) { |
1819 | SetPagePrivate(page); | 1850 | SetPagePrivate(page); |
1820 | WARN_ON(!page->mapping->a_ops->invalidatepage); | ||
1821 | set_page_private(page, EXTENT_PAGE_PRIVATE); | ||
1822 | page_cache_get(page); | 1851 | page_cache_get(page); |
1852 | set_page_private(page, EXTENT_PAGE_PRIVATE); | ||
1823 | } | 1853 | } |
1824 | } | 1854 | } |
1825 | 1855 | ||
@@ -2627,51 +2657,6 @@ out: | |||
2627 | return sector; | 2657 | return sector; |
2628 | } | 2658 | } |
2629 | 2659 | ||
2630 | static int add_lru(struct extent_io_tree *tree, struct extent_buffer *eb) | ||
2631 | { | ||
2632 | if (list_empty(&eb->lru)) { | ||
2633 | extent_buffer_get(eb); | ||
2634 | list_add(&eb->lru, &tree->buffer_lru); | ||
2635 | tree->lru_size++; | ||
2636 | if (tree->lru_size >= BUFFER_LRU_MAX) { | ||
2637 | struct extent_buffer *rm; | ||
2638 | rm = list_entry(tree->buffer_lru.prev, | ||
2639 | struct extent_buffer, lru); | ||
2640 | tree->lru_size--; | ||
2641 | list_del_init(&rm->lru); | ||
2642 | free_extent_buffer(rm); | ||
2643 | } | ||
2644 | } else | ||
2645 | list_move(&eb->lru, &tree->buffer_lru); | ||
2646 | return 0; | ||
2647 | } | ||
2648 | static struct extent_buffer *find_lru(struct extent_io_tree *tree, | ||
2649 | u64 start, unsigned long len) | ||
2650 | { | ||
2651 | struct list_head *lru = &tree->buffer_lru; | ||
2652 | struct list_head *cur = lru->next; | ||
2653 | struct extent_buffer *eb; | ||
2654 | |||
2655 | if (list_empty(lru)) | ||
2656 | return NULL; | ||
2657 | |||
2658 | do { | ||
2659 | eb = list_entry(cur, struct extent_buffer, lru); | ||
2660 | if (eb->start == start && eb->len == len) { | ||
2661 | extent_buffer_get(eb); | ||
2662 | return eb; | ||
2663 | } | ||
2664 | cur = cur->next; | ||
2665 | } while (cur != lru); | ||
2666 | return NULL; | ||
2667 | } | ||
2668 | |||
2669 | static inline unsigned long num_extent_pages(u64 start, u64 len) | ||
2670 | { | ||
2671 | return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - | ||
2672 | (start >> PAGE_CACHE_SHIFT); | ||
2673 | } | ||
2674 | |||
2675 | static inline struct page *extent_buffer_page(struct extent_buffer *eb, | 2660 | static inline struct page *extent_buffer_page(struct extent_buffer *eb, |
2676 | unsigned long i) | 2661 | unsigned long i) |
2677 | { | 2662 | { |
@@ -2688,44 +2673,10 @@ static inline struct page *extent_buffer_page(struct extent_buffer *eb, | |||
2688 | return p; | 2673 | return p; |
2689 | } | 2674 | } |
2690 | 2675 | ||
2691 | int release_extent_buffer_tail_pages(struct extent_buffer *eb) | 2676 | static inline unsigned long num_extent_pages(u64 start, u64 len) |
2692 | { | ||
2693 | unsigned long num_pages = num_extent_pages(eb->start, eb->len); | ||
2694 | struct page *page; | ||
2695 | unsigned long i; | ||
2696 | |||
2697 | if (num_pages == 1) | ||
2698 | return 0; | ||
2699 | for (i = 1; i < num_pages; i++) { | ||
2700 | page = extent_buffer_page(eb, i); | ||
2701 | page_cache_release(page); | ||
2702 | } | ||
2703 | return 0; | ||
2704 | } | ||
2705 | |||
2706 | |||
2707 | int invalidate_extent_lru(struct extent_io_tree *tree, u64 start, | ||
2708 | unsigned long len) | ||
2709 | { | 2677 | { |
2710 | struct list_head *lru = &tree->buffer_lru; | 2678 | return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - |
2711 | struct list_head *cur = lru->next; | 2679 | (start >> PAGE_CACHE_SHIFT); |
2712 | struct extent_buffer *eb; | ||
2713 | int found = 0; | ||
2714 | |||
2715 | spin_lock(&tree->lru_lock); | ||
2716 | if (list_empty(lru)) | ||
2717 | goto out; | ||
2718 | |||
2719 | do { | ||
2720 | eb = list_entry(cur, struct extent_buffer, lru); | ||
2721 | if (eb->start <= start && eb->start + eb->len > start) { | ||
2722 | eb->flags &= ~EXTENT_UPTODATE; | ||
2723 | } | ||
2724 | cur = cur->next; | ||
2725 | } while (cur != lru); | ||
2726 | out: | ||
2727 | spin_unlock(&tree->lru_lock); | ||
2728 | return found; | ||
2729 | } | 2680 | } |
2730 | 2681 | ||
2731 | static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, | 2682 | static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, |
@@ -2736,15 +2687,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, | |||
2736 | struct extent_buffer *eb = NULL; | 2687 | struct extent_buffer *eb = NULL; |
2737 | unsigned long flags; | 2688 | unsigned long flags; |
2738 | 2689 | ||
2739 | spin_lock(&tree->lru_lock); | ||
2740 | eb = find_lru(tree, start, len); | ||
2741 | spin_unlock(&tree->lru_lock); | ||
2742 | if (eb) { | ||
2743 | return eb; | ||
2744 | } | ||
2745 | |||
2746 | eb = kmem_cache_zalloc(extent_buffer_cache, mask); | 2690 | eb = kmem_cache_zalloc(extent_buffer_cache, mask); |
2747 | INIT_LIST_HEAD(&eb->lru); | ||
2748 | eb->start = start; | 2691 | eb->start = start; |
2749 | eb->len = len; | 2692 | eb->len = len; |
2750 | spin_lock_irqsave(&leak_lock, flags); | 2693 | spin_lock_irqsave(&leak_lock, flags); |
@@ -2773,17 +2716,24 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
2773 | unsigned long i; | 2716 | unsigned long i; |
2774 | unsigned long index = start >> PAGE_CACHE_SHIFT; | 2717 | unsigned long index = start >> PAGE_CACHE_SHIFT; |
2775 | struct extent_buffer *eb; | 2718 | struct extent_buffer *eb; |
2719 | struct extent_buffer *exists = NULL; | ||
2776 | struct page *p; | 2720 | struct page *p; |
2777 | struct address_space *mapping = tree->mapping; | 2721 | struct address_space *mapping = tree->mapping; |
2778 | int uptodate = 1; | 2722 | int uptodate = 1; |
2779 | 2723 | ||
2724 | spin_lock(&tree->buffer_lock); | ||
2725 | eb = buffer_search(tree, start); | ||
2726 | if (eb) { | ||
2727 | atomic_inc(&eb->refs); | ||
2728 | spin_unlock(&tree->buffer_lock); | ||
2729 | return eb; | ||
2730 | } | ||
2731 | spin_unlock(&tree->buffer_lock); | ||
2732 | |||
2780 | eb = __alloc_extent_buffer(tree, start, len, mask); | 2733 | eb = __alloc_extent_buffer(tree, start, len, mask); |
2781 | if (!eb) | 2734 | if (!eb) |
2782 | return NULL; | 2735 | return NULL; |
2783 | 2736 | ||
2784 | if (eb->flags & EXTENT_BUFFER_FILLED) | ||
2785 | goto lru_add; | ||
2786 | |||
2787 | if (page0) { | 2737 | if (page0) { |
2788 | eb->first_page = page0; | 2738 | eb->first_page = page0; |
2789 | i = 1; | 2739 | i = 1; |
@@ -2800,7 +2750,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
2800 | p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); | 2750 | p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); |
2801 | if (!p) { | 2751 | if (!p) { |
2802 | WARN_ON(1); | 2752 | WARN_ON(1); |
2803 | goto fail; | 2753 | goto free_eb; |
2804 | } | 2754 | } |
2805 | set_page_extent_mapped(p); | 2755 | set_page_extent_mapped(p); |
2806 | mark_page_accessed(p); | 2756 | mark_page_accessed(p); |
@@ -2818,25 +2768,28 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
2818 | eb->flags |= EXTENT_UPTODATE; | 2768 | eb->flags |= EXTENT_UPTODATE; |
2819 | eb->flags |= EXTENT_BUFFER_FILLED; | 2769 | eb->flags |= EXTENT_BUFFER_FILLED; |
2820 | 2770 | ||
2821 | lru_add: | 2771 | spin_lock(&tree->buffer_lock); |
2822 | spin_lock(&tree->lru_lock); | 2772 | exists = buffer_tree_insert(tree, start, &eb->rb_node); |
2823 | add_lru(tree, eb); | 2773 | if (exists) { |
2824 | spin_unlock(&tree->lru_lock); | 2774 | /* add one reference for the caller */ |
2775 | atomic_inc(&exists->refs); | ||
2776 | spin_unlock(&tree->buffer_lock); | ||
2777 | goto free_eb; | ||
2778 | } | ||
2779 | spin_unlock(&tree->buffer_lock); | ||
2780 | |||
2781 | /* add one reference for the tree */ | ||
2782 | atomic_inc(&eb->refs); | ||
2825 | return eb; | 2783 | return eb; |
2826 | 2784 | ||
2827 | fail: | 2785 | free_eb: |
2828 | spin_lock(&tree->lru_lock); | ||
2829 | list_del_init(&eb->lru); | ||
2830 | spin_unlock(&tree->lru_lock); | ||
2831 | if (!atomic_dec_and_test(&eb->refs)) | 2786 | if (!atomic_dec_and_test(&eb->refs)) |
2832 | return NULL; | 2787 | return exists; |
2833 | for (index = 1; index < i; index++) { | 2788 | for (index = 1; index < i; index++) |
2834 | page_cache_release(extent_buffer_page(eb, index)); | 2789 | page_cache_release(extent_buffer_page(eb, index)); |
2835 | } | 2790 | page_cache_release(extent_buffer_page(eb, 0)); |
2836 | if (i > 0) | ||
2837 | page_cache_release(extent_buffer_page(eb, 0)); | ||
2838 | __free_extent_buffer(eb); | 2791 | __free_extent_buffer(eb); |
2839 | return NULL; | 2792 | return exists; |
2840 | } | 2793 | } |
2841 | EXPORT_SYMBOL(alloc_extent_buffer); | 2794 | EXPORT_SYMBOL(alloc_extent_buffer); |
2842 | 2795 | ||
@@ -2844,89 +2797,27 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, | |||
2844 | u64 start, unsigned long len, | 2797 | u64 start, unsigned long len, |
2845 | gfp_t mask) | 2798 | gfp_t mask) |
2846 | { | 2799 | { |
2847 | unsigned long num_pages = num_extent_pages(start, len); | ||
2848 | unsigned long i; | ||
2849 | unsigned long index = start >> PAGE_CACHE_SHIFT; | ||
2850 | struct extent_buffer *eb; | 2800 | struct extent_buffer *eb; |
2851 | struct page *p; | ||
2852 | struct address_space *mapping = tree->mapping; | ||
2853 | int uptodate = 1; | ||
2854 | 2801 | ||
2855 | eb = __alloc_extent_buffer(tree, start, len, mask); | 2802 | spin_lock(&tree->buffer_lock); |
2856 | if (!eb) | 2803 | eb = buffer_search(tree, start); |
2857 | return NULL; | 2804 | if (eb) |
2858 | 2805 | atomic_inc(&eb->refs); | |
2859 | if (eb->flags & EXTENT_BUFFER_FILLED) | 2806 | spin_unlock(&tree->buffer_lock); |
2860 | goto lru_add; | ||
2861 | |||
2862 | for (i = 0; i < num_pages; i++, index++) { | ||
2863 | p = find_get_page(mapping, index); | ||
2864 | if (!p) { | ||
2865 | goto fail; | ||
2866 | } | ||
2867 | if (TestSetPageLocked(p)) { | ||
2868 | page_cache_release(p); | ||
2869 | goto fail; | ||
2870 | } | ||
2871 | |||
2872 | set_page_extent_mapped(p); | ||
2873 | mark_page_accessed(p); | ||
2874 | |||
2875 | if (i == 0) { | ||
2876 | eb->first_page = p; | ||
2877 | set_page_extent_head(p, len); | ||
2878 | } else { | ||
2879 | set_page_private(p, EXTENT_PAGE_PRIVATE); | ||
2880 | } | ||
2881 | |||
2882 | if (!PageUptodate(p)) | ||
2883 | uptodate = 0; | ||
2884 | unlock_page(p); | ||
2885 | } | ||
2886 | if (uptodate) | ||
2887 | eb->flags |= EXTENT_UPTODATE; | ||
2888 | eb->flags |= EXTENT_BUFFER_FILLED; | ||
2889 | 2807 | ||
2890 | lru_add: | ||
2891 | spin_lock(&tree->lru_lock); | ||
2892 | add_lru(tree, eb); | ||
2893 | spin_unlock(&tree->lru_lock); | ||
2894 | return eb; | 2808 | return eb; |
2895 | fail: | ||
2896 | spin_lock(&tree->lru_lock); | ||
2897 | list_del_init(&eb->lru); | ||
2898 | spin_unlock(&tree->lru_lock); | ||
2899 | if (!atomic_dec_and_test(&eb->refs)) | ||
2900 | return NULL; | ||
2901 | for (index = 1; index < i; index++) { | ||
2902 | page_cache_release(extent_buffer_page(eb, index)); | ||
2903 | } | ||
2904 | if (i > 0) | ||
2905 | page_cache_release(extent_buffer_page(eb, 0)); | ||
2906 | __free_extent_buffer(eb); | ||
2907 | return NULL; | ||
2908 | } | 2809 | } |
2909 | EXPORT_SYMBOL(find_extent_buffer); | 2810 | EXPORT_SYMBOL(find_extent_buffer); |
2910 | 2811 | ||
2911 | void free_extent_buffer(struct extent_buffer *eb) | 2812 | void free_extent_buffer(struct extent_buffer *eb) |
2912 | { | 2813 | { |
2913 | unsigned long i; | ||
2914 | unsigned long num_pages; | ||
2915 | |||
2916 | if (!eb) | 2814 | if (!eb) |
2917 | return; | 2815 | return; |
2918 | 2816 | ||
2919 | if (!atomic_dec_and_test(&eb->refs)) | 2817 | if (!atomic_dec_and_test(&eb->refs)) |
2920 | return; | 2818 | return; |
2921 | 2819 | ||
2922 | WARN_ON(!list_empty(&eb->lru)); | 2820 | WARN_ON(1); |
2923 | num_pages = num_extent_pages(eb->start, eb->len); | ||
2924 | |||
2925 | for (i = 1; i < num_pages; i++) { | ||
2926 | page_cache_release(extent_buffer_page(eb, i)); | ||
2927 | } | ||
2928 | page_cache_release(extent_buffer_page(eb, 0)); | ||
2929 | __free_extent_buffer(eb); | ||
2930 | } | 2821 | } |
2931 | EXPORT_SYMBOL(free_extent_buffer); | 2822 | EXPORT_SYMBOL(free_extent_buffer); |
2932 | 2823 | ||
@@ -3583,3 +3474,35 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, | |||
3583 | } | 3474 | } |
3584 | } | 3475 | } |
3585 | EXPORT_SYMBOL(memmove_extent_buffer); | 3476 | EXPORT_SYMBOL(memmove_extent_buffer); |
3477 | |||
3478 | int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page) | ||
3479 | { | ||
3480 | u64 start = page_offset(page); | ||
3481 | struct extent_buffer *eb; | ||
3482 | int ret = 1; | ||
3483 | unsigned long i; | ||
3484 | unsigned long num_pages; | ||
3485 | |||
3486 | spin_lock(&tree->buffer_lock); | ||
3487 | eb = buffer_search(tree, start); | ||
3488 | if (!eb) | ||
3489 | goto out; | ||
3490 | |||
3491 | if (atomic_read(&eb->refs) > 1) { | ||
3492 | ret = 0; | ||
3493 | goto out; | ||
3494 | } | ||
3495 | /* at this point we can safely release the extent buffer */ | ||
3496 | num_pages = num_extent_pages(eb->start, eb->len); | ||
3497 | for (i = 0; i < num_pages; i++) { | ||
3498 | struct page *page = extent_buffer_page(eb, i); | ||
3499 | page_cache_release(page); | ||
3500 | } | ||
3501 | rb_erase(&eb->rb_node, &tree->buffer); | ||
3502 | __free_extent_buffer(eb); | ||
3503 | out: | ||
3504 | spin_unlock(&tree->buffer_lock); | ||
3505 | return ret; | ||
3506 | } | ||
3507 | EXPORT_SYMBOL(try_release_extent_buffer); | ||
3508 | |||
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 23affd27af5e..dd367617d780 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -54,13 +54,12 @@ struct extent_io_ops { | |||
54 | 54 | ||
55 | struct extent_io_tree { | 55 | struct extent_io_tree { |
56 | struct rb_root state; | 56 | struct rb_root state; |
57 | struct rb_root buffer; | ||
57 | struct address_space *mapping; | 58 | struct address_space *mapping; |
58 | u64 dirty_bytes; | 59 | u64 dirty_bytes; |
59 | spinlock_t lock; | 60 | spinlock_t lock; |
61 | spinlock_t buffer_lock; | ||
60 | struct extent_io_ops *ops; | 62 | struct extent_io_ops *ops; |
61 | spinlock_t lru_lock; | ||
62 | struct list_head buffer_lru; | ||
63 | int lru_size; | ||
64 | struct extent_state *last; | 63 | struct extent_state *last; |
65 | }; | 64 | }; |
66 | 65 | ||
@@ -87,10 +86,10 @@ struct extent_buffer { | |||
87 | unsigned long map_start; | 86 | unsigned long map_start; |
88 | unsigned long map_len; | 87 | unsigned long map_len; |
89 | struct page *first_page; | 88 | struct page *first_page; |
90 | struct list_head lru; | ||
91 | atomic_t refs; | 89 | atomic_t refs; |
92 | int flags; | 90 | int flags; |
93 | struct list_head leak_list; | 91 | struct list_head leak_list; |
92 | struct rb_node rb_node; | ||
94 | }; | 93 | }; |
95 | 94 | ||
96 | struct extent_map_tree; | 95 | struct extent_map_tree; |
@@ -112,10 +111,10 @@ typedef struct extent_map *(get_extent_t)(struct inode *inode, | |||
112 | 111 | ||
113 | void extent_io_tree_init(struct extent_io_tree *tree, | 112 | void extent_io_tree_init(struct extent_io_tree *tree, |
114 | struct address_space *mapping, gfp_t mask); | 113 | struct address_space *mapping, gfp_t mask); |
115 | void extent_io_tree_empty_lru(struct extent_io_tree *tree); | ||
116 | int try_release_extent_mapping(struct extent_map_tree *map, | 114 | int try_release_extent_mapping(struct extent_map_tree *map, |
117 | struct extent_io_tree *tree, struct page *page, | 115 | struct extent_io_tree *tree, struct page *page, |
118 | gfp_t mask); | 116 | gfp_t mask); |
117 | int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page); | ||
119 | int try_release_extent_state(struct extent_map_tree *map, | 118 | int try_release_extent_state(struct extent_map_tree *map, |
120 | struct extent_io_tree *tree, struct page *page, | 119 | struct extent_io_tree *tree, struct page *page, |
121 | gfp_t mask); | 120 | gfp_t mask); |
@@ -241,8 +240,6 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset, | |||
241 | unsigned long *map_start, | 240 | unsigned long *map_start, |
242 | unsigned long *map_len, int km); | 241 | unsigned long *map_len, int km); |
243 | void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km); | 242 | void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km); |
244 | int invalidate_extent_lru(struct extent_io_tree *tree, u64 start, | ||
245 | unsigned long len); | ||
246 | int release_extent_buffer_tail_pages(struct extent_buffer *eb); | 243 | int release_extent_buffer_tail_pages(struct extent_buffer *eb); |
247 | int extent_range_uptodate(struct extent_io_tree *tree, | 244 | int extent_range_uptodate(struct extent_io_tree *tree, |
248 | u64 start, u64 end); | 245 | u64 start, u64 end); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8fb6dc25e7a5..60852ada658e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -2670,7 +2670,6 @@ static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) | |||
2670 | map = &BTRFS_I(page->mapping->host)->extent_tree; | 2670 | map = &BTRFS_I(page->mapping->host)->extent_tree; |
2671 | ret = try_release_extent_mapping(map, tree, page, gfp_flags); | 2671 | ret = try_release_extent_mapping(map, tree, page, gfp_flags); |
2672 | if (ret == 1) { | 2672 | if (ret == 1) { |
2673 | invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE); | ||
2674 | ClearPagePrivate(page); | 2673 | ClearPagePrivate(page); |
2675 | set_page_private(page, 0); | 2674 | set_page_private(page, 0); |
2676 | page_cache_release(page); | 2675 | page_cache_release(page); |
@@ -2721,8 +2720,6 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) | |||
2721 | 2720 | ||
2722 | ClearPageChecked(page); | 2721 | ClearPageChecked(page); |
2723 | if (PagePrivate(page)) { | 2722 | if (PagePrivate(page)) { |
2724 | invalidate_extent_lru(tree, page_offset(page), | ||
2725 | PAGE_CACHE_SIZE); | ||
2726 | ClearPagePrivate(page); | 2723 | ClearPagePrivate(page); |
2727 | set_page_private(page, 0); | 2724 | set_page_private(page, 0); |
2728 | page_cache_release(page); | 2725 | page_cache_release(page); |