diff options
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 309 |
1 files changed, 116 insertions, 193 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index d4a63ae7ed1b..32bb4ed3723d 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -91,29 +91,16 @@ void extent_io_tree_init(struct extent_io_tree *tree, | |||
91 | struct address_space *mapping, gfp_t mask) | 91 | struct address_space *mapping, gfp_t mask) |
92 | { | 92 | { |
93 | tree->state.rb_node = NULL; | 93 | tree->state.rb_node = NULL; |
94 | tree->buffer.rb_node = NULL; | ||
94 | tree->ops = NULL; | 95 | tree->ops = NULL; |
95 | tree->dirty_bytes = 0; | 96 | tree->dirty_bytes = 0; |
96 | spin_lock_init(&tree->lock); | 97 | spin_lock_init(&tree->lock); |
97 | spin_lock_init(&tree->lru_lock); | 98 | spin_lock_init(&tree->buffer_lock); |
98 | tree->mapping = mapping; | 99 | tree->mapping = mapping; |
99 | INIT_LIST_HEAD(&tree->buffer_lru); | ||
100 | tree->lru_size = 0; | ||
101 | tree->last = NULL; | 100 | tree->last = NULL; |
102 | } | 101 | } |
103 | EXPORT_SYMBOL(extent_io_tree_init); | 102 | EXPORT_SYMBOL(extent_io_tree_init); |
104 | 103 | ||
105 | void extent_io_tree_empty_lru(struct extent_io_tree *tree) | ||
106 | { | ||
107 | struct extent_buffer *eb; | ||
108 | while(!list_empty(&tree->buffer_lru)) { | ||
109 | eb = list_entry(tree->buffer_lru.next, struct extent_buffer, | ||
110 | lru); | ||
111 | list_del_init(&eb->lru); | ||
112 | free_extent_buffer(eb); | ||
113 | } | ||
114 | } | ||
115 | EXPORT_SYMBOL(extent_io_tree_empty_lru); | ||
116 | |||
117 | struct extent_state *alloc_extent_state(gfp_t mask) | 104 | struct extent_state *alloc_extent_state(gfp_t mask) |
118 | { | 105 | { |
119 | struct extent_state *state; | 106 | struct extent_state *state; |
@@ -245,6 +232,50 @@ static inline struct rb_node *tree_search(struct extent_io_tree *tree, | |||
245 | return ret; | 232 | return ret; |
246 | } | 233 | } |
247 | 234 | ||
235 | static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree, | ||
236 | u64 offset, struct rb_node *node) | ||
237 | { | ||
238 | struct rb_root *root = &tree->buffer; | ||
239 | struct rb_node ** p = &root->rb_node; | ||
240 | struct rb_node * parent = NULL; | ||
241 | struct extent_buffer *eb; | ||
242 | |||
243 | while(*p) { | ||
244 | parent = *p; | ||
245 | eb = rb_entry(parent, struct extent_buffer, rb_node); | ||
246 | |||
247 | if (offset < eb->start) | ||
248 | p = &(*p)->rb_left; | ||
249 | else if (offset > eb->start) | ||
250 | p = &(*p)->rb_right; | ||
251 | else | ||
252 | return eb; | ||
253 | } | ||
254 | |||
255 | rb_link_node(node, parent, p); | ||
256 | rb_insert_color(node, root); | ||
257 | return NULL; | ||
258 | } | ||
259 | |||
260 | static struct extent_buffer *buffer_search(struct extent_io_tree *tree, | ||
261 | u64 offset) | ||
262 | { | ||
263 | struct rb_root *root = &tree->buffer; | ||
264 | struct rb_node * n = root->rb_node; | ||
265 | struct extent_buffer *eb; | ||
266 | |||
267 | while(n) { | ||
268 | eb = rb_entry(n, struct extent_buffer, rb_node); | ||
269 | if (offset < eb->start) | ||
270 | n = n->rb_left; | ||
271 | else if (offset > eb->start) | ||
272 | n = n->rb_right; | ||
273 | else | ||
274 | return eb; | ||
275 | } | ||
276 | return NULL; | ||
277 | } | ||
278 | |||
248 | /* | 279 | /* |
249 | * utility function to look for merge candidates inside a given range. | 280 | * utility function to look for merge candidates inside a given range. |
250 | * Any extents with matching state are merged together into a single | 281 | * Any extents with matching state are merged together into a single |
@@ -1817,9 +1848,8 @@ void set_page_extent_mapped(struct page *page) | |||
1817 | { | 1848 | { |
1818 | if (!PagePrivate(page)) { | 1849 | if (!PagePrivate(page)) { |
1819 | SetPagePrivate(page); | 1850 | SetPagePrivate(page); |
1820 | WARN_ON(!page->mapping->a_ops->invalidatepage); | ||
1821 | set_page_private(page, EXTENT_PAGE_PRIVATE); | ||
1822 | page_cache_get(page); | 1851 | page_cache_get(page); |
1852 | set_page_private(page, EXTENT_PAGE_PRIVATE); | ||
1823 | } | 1853 | } |
1824 | } | 1854 | } |
1825 | 1855 | ||
@@ -2627,51 +2657,6 @@ out: | |||
2627 | return sector; | 2657 | return sector; |
2628 | } | 2658 | } |
2629 | 2659 | ||
2630 | static int add_lru(struct extent_io_tree *tree, struct extent_buffer *eb) | ||
2631 | { | ||
2632 | if (list_empty(&eb->lru)) { | ||
2633 | extent_buffer_get(eb); | ||
2634 | list_add(&eb->lru, &tree->buffer_lru); | ||
2635 | tree->lru_size++; | ||
2636 | if (tree->lru_size >= BUFFER_LRU_MAX) { | ||
2637 | struct extent_buffer *rm; | ||
2638 | rm = list_entry(tree->buffer_lru.prev, | ||
2639 | struct extent_buffer, lru); | ||
2640 | tree->lru_size--; | ||
2641 | list_del_init(&rm->lru); | ||
2642 | free_extent_buffer(rm); | ||
2643 | } | ||
2644 | } else | ||
2645 | list_move(&eb->lru, &tree->buffer_lru); | ||
2646 | return 0; | ||
2647 | } | ||
2648 | static struct extent_buffer *find_lru(struct extent_io_tree *tree, | ||
2649 | u64 start, unsigned long len) | ||
2650 | { | ||
2651 | struct list_head *lru = &tree->buffer_lru; | ||
2652 | struct list_head *cur = lru->next; | ||
2653 | struct extent_buffer *eb; | ||
2654 | |||
2655 | if (list_empty(lru)) | ||
2656 | return NULL; | ||
2657 | |||
2658 | do { | ||
2659 | eb = list_entry(cur, struct extent_buffer, lru); | ||
2660 | if (eb->start == start && eb->len == len) { | ||
2661 | extent_buffer_get(eb); | ||
2662 | return eb; | ||
2663 | } | ||
2664 | cur = cur->next; | ||
2665 | } while (cur != lru); | ||
2666 | return NULL; | ||
2667 | } | ||
2668 | |||
2669 | static inline unsigned long num_extent_pages(u64 start, u64 len) | ||
2670 | { | ||
2671 | return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - | ||
2672 | (start >> PAGE_CACHE_SHIFT); | ||
2673 | } | ||
2674 | |||
2675 | static inline struct page *extent_buffer_page(struct extent_buffer *eb, | 2660 | static inline struct page *extent_buffer_page(struct extent_buffer *eb, |
2676 | unsigned long i) | 2661 | unsigned long i) |
2677 | { | 2662 | { |
@@ -2688,44 +2673,10 @@ static inline struct page *extent_buffer_page(struct extent_buffer *eb, | |||
2688 | return p; | 2673 | return p; |
2689 | } | 2674 | } |
2690 | 2675 | ||
2691 | int release_extent_buffer_tail_pages(struct extent_buffer *eb) | 2676 | static inline unsigned long num_extent_pages(u64 start, u64 len) |
2692 | { | ||
2693 | unsigned long num_pages = num_extent_pages(eb->start, eb->len); | ||
2694 | struct page *page; | ||
2695 | unsigned long i; | ||
2696 | |||
2697 | if (num_pages == 1) | ||
2698 | return 0; | ||
2699 | for (i = 1; i < num_pages; i++) { | ||
2700 | page = extent_buffer_page(eb, i); | ||
2701 | page_cache_release(page); | ||
2702 | } | ||
2703 | return 0; | ||
2704 | } | ||
2705 | |||
2706 | |||
2707 | int invalidate_extent_lru(struct extent_io_tree *tree, u64 start, | ||
2708 | unsigned long len) | ||
2709 | { | 2677 | { |
2710 | struct list_head *lru = &tree->buffer_lru; | 2678 | return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - |
2711 | struct list_head *cur = lru->next; | 2679 | (start >> PAGE_CACHE_SHIFT); |
2712 | struct extent_buffer *eb; | ||
2713 | int found = 0; | ||
2714 | |||
2715 | spin_lock(&tree->lru_lock); | ||
2716 | if (list_empty(lru)) | ||
2717 | goto out; | ||
2718 | |||
2719 | do { | ||
2720 | eb = list_entry(cur, struct extent_buffer, lru); | ||
2721 | if (eb->start <= start && eb->start + eb->len > start) { | ||
2722 | eb->flags &= ~EXTENT_UPTODATE; | ||
2723 | } | ||
2724 | cur = cur->next; | ||
2725 | } while (cur != lru); | ||
2726 | out: | ||
2727 | spin_unlock(&tree->lru_lock); | ||
2728 | return found; | ||
2729 | } | 2680 | } |
2730 | 2681 | ||
2731 | static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, | 2682 | static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, |
@@ -2736,15 +2687,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, | |||
2736 | struct extent_buffer *eb = NULL; | 2687 | struct extent_buffer *eb = NULL; |
2737 | unsigned long flags; | 2688 | unsigned long flags; |
2738 | 2689 | ||
2739 | spin_lock(&tree->lru_lock); | ||
2740 | eb = find_lru(tree, start, len); | ||
2741 | spin_unlock(&tree->lru_lock); | ||
2742 | if (eb) { | ||
2743 | return eb; | ||
2744 | } | ||
2745 | |||
2746 | eb = kmem_cache_zalloc(extent_buffer_cache, mask); | 2690 | eb = kmem_cache_zalloc(extent_buffer_cache, mask); |
2747 | INIT_LIST_HEAD(&eb->lru); | ||
2748 | eb->start = start; | 2691 | eb->start = start; |
2749 | eb->len = len; | 2692 | eb->len = len; |
2750 | spin_lock_irqsave(&leak_lock, flags); | 2693 | spin_lock_irqsave(&leak_lock, flags); |
@@ -2773,17 +2716,24 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
2773 | unsigned long i; | 2716 | unsigned long i; |
2774 | unsigned long index = start >> PAGE_CACHE_SHIFT; | 2717 | unsigned long index = start >> PAGE_CACHE_SHIFT; |
2775 | struct extent_buffer *eb; | 2718 | struct extent_buffer *eb; |
2719 | struct extent_buffer *exists = NULL; | ||
2776 | struct page *p; | 2720 | struct page *p; |
2777 | struct address_space *mapping = tree->mapping; | 2721 | struct address_space *mapping = tree->mapping; |
2778 | int uptodate = 1; | 2722 | int uptodate = 1; |
2779 | 2723 | ||
2724 | spin_lock(&tree->buffer_lock); | ||
2725 | eb = buffer_search(tree, start); | ||
2726 | if (eb) { | ||
2727 | atomic_inc(&eb->refs); | ||
2728 | spin_unlock(&tree->buffer_lock); | ||
2729 | return eb; | ||
2730 | } | ||
2731 | spin_unlock(&tree->buffer_lock); | ||
2732 | |||
2780 | eb = __alloc_extent_buffer(tree, start, len, mask); | 2733 | eb = __alloc_extent_buffer(tree, start, len, mask); |
2781 | if (!eb) | 2734 | if (!eb) |
2782 | return NULL; | 2735 | return NULL; |
2783 | 2736 | ||
2784 | if (eb->flags & EXTENT_BUFFER_FILLED) | ||
2785 | goto lru_add; | ||
2786 | |||
2787 | if (page0) { | 2737 | if (page0) { |
2788 | eb->first_page = page0; | 2738 | eb->first_page = page0; |
2789 | i = 1; | 2739 | i = 1; |
@@ -2800,7 +2750,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
2800 | p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); | 2750 | p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); |
2801 | if (!p) { | 2751 | if (!p) { |
2802 | WARN_ON(1); | 2752 | WARN_ON(1); |
2803 | goto fail; | 2753 | goto free_eb; |
2804 | } | 2754 | } |
2805 | set_page_extent_mapped(p); | 2755 | set_page_extent_mapped(p); |
2806 | mark_page_accessed(p); | 2756 | mark_page_accessed(p); |
@@ -2818,25 +2768,28 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
2818 | eb->flags |= EXTENT_UPTODATE; | 2768 | eb->flags |= EXTENT_UPTODATE; |
2819 | eb->flags |= EXTENT_BUFFER_FILLED; | 2769 | eb->flags |= EXTENT_BUFFER_FILLED; |
2820 | 2770 | ||
2821 | lru_add: | 2771 | spin_lock(&tree->buffer_lock); |
2822 | spin_lock(&tree->lru_lock); | 2772 | exists = buffer_tree_insert(tree, start, &eb->rb_node); |
2823 | add_lru(tree, eb); | 2773 | if (exists) { |
2824 | spin_unlock(&tree->lru_lock); | 2774 | /* add one reference for the caller */ |
2775 | atomic_inc(&exists->refs); | ||
2776 | spin_unlock(&tree->buffer_lock); | ||
2777 | goto free_eb; | ||
2778 | } | ||
2779 | spin_unlock(&tree->buffer_lock); | ||
2780 | |||
2781 | /* add one reference for the tree */ | ||
2782 | atomic_inc(&eb->refs); | ||
2825 | return eb; | 2783 | return eb; |
2826 | 2784 | ||
2827 | fail: | 2785 | free_eb: |
2828 | spin_lock(&tree->lru_lock); | ||
2829 | list_del_init(&eb->lru); | ||
2830 | spin_unlock(&tree->lru_lock); | ||
2831 | if (!atomic_dec_and_test(&eb->refs)) | 2786 | if (!atomic_dec_and_test(&eb->refs)) |
2832 | return NULL; | 2787 | return exists; |
2833 | for (index = 1; index < i; index++) { | 2788 | for (index = 1; index < i; index++) |
2834 | page_cache_release(extent_buffer_page(eb, index)); | 2789 | page_cache_release(extent_buffer_page(eb, index)); |
2835 | } | 2790 | page_cache_release(extent_buffer_page(eb, 0)); |
2836 | if (i > 0) | ||
2837 | page_cache_release(extent_buffer_page(eb, 0)); | ||
2838 | __free_extent_buffer(eb); | 2791 | __free_extent_buffer(eb); |
2839 | return NULL; | 2792 | return exists; |
2840 | } | 2793 | } |
2841 | EXPORT_SYMBOL(alloc_extent_buffer); | 2794 | EXPORT_SYMBOL(alloc_extent_buffer); |
2842 | 2795 | ||
@@ -2844,89 +2797,27 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, | |||
2844 | u64 start, unsigned long len, | 2797 | u64 start, unsigned long len, |
2845 | gfp_t mask) | 2798 | gfp_t mask) |
2846 | { | 2799 | { |
2847 | unsigned long num_pages = num_extent_pages(start, len); | ||
2848 | unsigned long i; | ||
2849 | unsigned long index = start >> PAGE_CACHE_SHIFT; | ||
2850 | struct extent_buffer *eb; | 2800 | struct extent_buffer *eb; |
2851 | struct page *p; | ||
2852 | struct address_space *mapping = tree->mapping; | ||
2853 | int uptodate = 1; | ||
2854 | 2801 | ||
2855 | eb = __alloc_extent_buffer(tree, start, len, mask); | 2802 | spin_lock(&tree->buffer_lock); |
2856 | if (!eb) | 2803 | eb = buffer_search(tree, start); |
2857 | return NULL; | 2804 | if (eb) |
2858 | 2805 | atomic_inc(&eb->refs); | |
2859 | if (eb->flags & EXTENT_BUFFER_FILLED) | 2806 | spin_unlock(&tree->buffer_lock); |
2860 | goto lru_add; | ||
2861 | |||
2862 | for (i = 0; i < num_pages; i++, index++) { | ||
2863 | p = find_get_page(mapping, index); | ||
2864 | if (!p) { | ||
2865 | goto fail; | ||
2866 | } | ||
2867 | if (TestSetPageLocked(p)) { | ||
2868 | page_cache_release(p); | ||
2869 | goto fail; | ||
2870 | } | ||
2871 | |||
2872 | set_page_extent_mapped(p); | ||
2873 | mark_page_accessed(p); | ||
2874 | |||
2875 | if (i == 0) { | ||
2876 | eb->first_page = p; | ||
2877 | set_page_extent_head(p, len); | ||
2878 | } else { | ||
2879 | set_page_private(p, EXTENT_PAGE_PRIVATE); | ||
2880 | } | ||
2881 | |||
2882 | if (!PageUptodate(p)) | ||
2883 | uptodate = 0; | ||
2884 | unlock_page(p); | ||
2885 | } | ||
2886 | if (uptodate) | ||
2887 | eb->flags |= EXTENT_UPTODATE; | ||
2888 | eb->flags |= EXTENT_BUFFER_FILLED; | ||
2889 | 2807 | ||
2890 | lru_add: | ||
2891 | spin_lock(&tree->lru_lock); | ||
2892 | add_lru(tree, eb); | ||
2893 | spin_unlock(&tree->lru_lock); | ||
2894 | return eb; | 2808 | return eb; |
2895 | fail: | ||
2896 | spin_lock(&tree->lru_lock); | ||
2897 | list_del_init(&eb->lru); | ||
2898 | spin_unlock(&tree->lru_lock); | ||
2899 | if (!atomic_dec_and_test(&eb->refs)) | ||
2900 | return NULL; | ||
2901 | for (index = 1; index < i; index++) { | ||
2902 | page_cache_release(extent_buffer_page(eb, index)); | ||
2903 | } | ||
2904 | if (i > 0) | ||
2905 | page_cache_release(extent_buffer_page(eb, 0)); | ||
2906 | __free_extent_buffer(eb); | ||
2907 | return NULL; | ||
2908 | } | 2809 | } |
2909 | EXPORT_SYMBOL(find_extent_buffer); | 2810 | EXPORT_SYMBOL(find_extent_buffer); |
2910 | 2811 | ||
2911 | void free_extent_buffer(struct extent_buffer *eb) | 2812 | void free_extent_buffer(struct extent_buffer *eb) |
2912 | { | 2813 | { |
2913 | unsigned long i; | ||
2914 | unsigned long num_pages; | ||
2915 | |||
2916 | if (!eb) | 2814 | if (!eb) |
2917 | return; | 2815 | return; |
2918 | 2816 | ||
2919 | if (!atomic_dec_and_test(&eb->refs)) | 2817 | if (!atomic_dec_and_test(&eb->refs)) |
2920 | return; | 2818 | return; |
2921 | 2819 | ||
2922 | WARN_ON(!list_empty(&eb->lru)); | 2820 | WARN_ON(1); |
2923 | num_pages = num_extent_pages(eb->start, eb->len); | ||
2924 | |||
2925 | for (i = 1; i < num_pages; i++) { | ||
2926 | page_cache_release(extent_buffer_page(eb, i)); | ||
2927 | } | ||
2928 | page_cache_release(extent_buffer_page(eb, 0)); | ||
2929 | __free_extent_buffer(eb); | ||
2930 | } | 2821 | } |
2931 | EXPORT_SYMBOL(free_extent_buffer); | 2822 | EXPORT_SYMBOL(free_extent_buffer); |
2932 | 2823 | ||
@@ -3583,3 +3474,35 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, | |||
3583 | } | 3474 | } |
3584 | } | 3475 | } |
3585 | EXPORT_SYMBOL(memmove_extent_buffer); | 3476 | EXPORT_SYMBOL(memmove_extent_buffer); |
3477 | |||
3478 | int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page) | ||
3479 | { | ||
3480 | u64 start = page_offset(page); | ||
3481 | struct extent_buffer *eb; | ||
3482 | int ret = 1; | ||
3483 | unsigned long i; | ||
3484 | unsigned long num_pages; | ||
3485 | |||
3486 | spin_lock(&tree->buffer_lock); | ||
3487 | eb = buffer_search(tree, start); | ||
3488 | if (!eb) | ||
3489 | goto out; | ||
3490 | |||
3491 | if (atomic_read(&eb->refs) > 1) { | ||
3492 | ret = 0; | ||
3493 | goto out; | ||
3494 | } | ||
3495 | /* at this point we can safely release the extent buffer */ | ||
3496 | num_pages = num_extent_pages(eb->start, eb->len); | ||
3497 | for (i = 0; i < num_pages; i++) { | ||
3498 | struct page *page = extent_buffer_page(eb, i); | ||
3499 | page_cache_release(page); | ||
3500 | } | ||
3501 | rb_erase(&eb->rb_node, &tree->buffer); | ||
3502 | __free_extent_buffer(eb); | ||
3503 | out: | ||
3504 | spin_unlock(&tree->buffer_lock); | ||
3505 | return ret; | ||
3506 | } | ||
3507 | EXPORT_SYMBOL(try_release_extent_buffer); | ||
3508 | |||