diff options
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 134 |
1 files changed, 109 insertions, 25 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e086d407f1fa..ebe6b29e6069 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/spinlock.h> | 9 | #include <linux/spinlock.h> |
10 | #include <linux/blkdev.h> | 10 | #include <linux/blkdev.h> |
11 | #include <linux/swap.h> | 11 | #include <linux/swap.h> |
12 | #include <linux/version.h> | ||
13 | #include <linux/writeback.h> | 12 | #include <linux/writeback.h> |
14 | #include <linux/pagevec.h> | 13 | #include <linux/pagevec.h> |
15 | #include "extent_io.h" | 14 | #include "extent_io.h" |
@@ -31,7 +30,7 @@ static LIST_HEAD(buffers); | |||
31 | static LIST_HEAD(states); | 30 | static LIST_HEAD(states); |
32 | 31 | ||
33 | #define LEAK_DEBUG 0 | 32 | #define LEAK_DEBUG 0 |
34 | #ifdef LEAK_DEBUG | 33 | #if LEAK_DEBUG |
35 | static DEFINE_SPINLOCK(leak_lock); | 34 | static DEFINE_SPINLOCK(leak_lock); |
36 | #endif | 35 | #endif |
37 | 36 | ||
@@ -120,7 +119,7 @@ void extent_io_tree_init(struct extent_io_tree *tree, | |||
120 | static struct extent_state *alloc_extent_state(gfp_t mask) | 119 | static struct extent_state *alloc_extent_state(gfp_t mask) |
121 | { | 120 | { |
122 | struct extent_state *state; | 121 | struct extent_state *state; |
123 | #ifdef LEAK_DEBUG | 122 | #if LEAK_DEBUG |
124 | unsigned long flags; | 123 | unsigned long flags; |
125 | #endif | 124 | #endif |
126 | 125 | ||
@@ -130,7 +129,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask) | |||
130 | state->state = 0; | 129 | state->state = 0; |
131 | state->private = 0; | 130 | state->private = 0; |
132 | state->tree = NULL; | 131 | state->tree = NULL; |
133 | #ifdef LEAK_DEBUG | 132 | #if LEAK_DEBUG |
134 | spin_lock_irqsave(&leak_lock, flags); | 133 | spin_lock_irqsave(&leak_lock, flags); |
135 | list_add(&state->leak_list, &states); | 134 | list_add(&state->leak_list, &states); |
136 | spin_unlock_irqrestore(&leak_lock, flags); | 135 | spin_unlock_irqrestore(&leak_lock, flags); |
@@ -145,11 +144,11 @@ static void free_extent_state(struct extent_state *state) | |||
145 | if (!state) | 144 | if (!state) |
146 | return; | 145 | return; |
147 | if (atomic_dec_and_test(&state->refs)) { | 146 | if (atomic_dec_and_test(&state->refs)) { |
148 | #ifdef LEAK_DEBUG | 147 | #if LEAK_DEBUG |
149 | unsigned long flags; | 148 | unsigned long flags; |
150 | #endif | 149 | #endif |
151 | WARN_ON(state->tree); | 150 | WARN_ON(state->tree); |
152 | #ifdef LEAK_DEBUG | 151 | #if LEAK_DEBUG |
153 | spin_lock_irqsave(&leak_lock, flags); | 152 | spin_lock_irqsave(&leak_lock, flags); |
154 | list_del(&state->leak_list); | 153 | list_del(&state->leak_list); |
155 | spin_unlock_irqrestore(&leak_lock, flags); | 154 | spin_unlock_irqrestore(&leak_lock, flags); |
@@ -416,8 +415,6 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig, | |||
416 | 415 | ||
417 | node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node); | 416 | node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node); |
418 | if (node) { | 417 | if (node) { |
419 | struct extent_state *found; | ||
420 | found = rb_entry(node, struct extent_state, rb_node); | ||
421 | free_extent_state(prealloc); | 418 | free_extent_state(prealloc); |
422 | return -EEXIST; | 419 | return -EEXIST; |
423 | } | 420 | } |
@@ -2378,11 +2375,6 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, | |||
2378 | int scanned = 0; | 2375 | int scanned = 0; |
2379 | int range_whole = 0; | 2376 | int range_whole = 0; |
2380 | 2377 | ||
2381 | if (wbc->nonblocking && bdi_write_congested(bdi)) { | ||
2382 | wbc->encountered_congestion = 1; | ||
2383 | return 0; | ||
2384 | } | ||
2385 | |||
2386 | pagevec_init(&pvec, 0); | 2378 | pagevec_init(&pvec, 0); |
2387 | if (wbc->range_cyclic) { | 2379 | if (wbc->range_cyclic) { |
2388 | index = mapping->writeback_index; /* Start from prev offset */ | 2380 | index = mapping->writeback_index; /* Start from prev offset */ |
@@ -2855,6 +2847,98 @@ out: | |||
2855 | return sector; | 2847 | return sector; |
2856 | } | 2848 | } |
2857 | 2849 | ||
2850 | int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | ||
2851 | __u64 start, __u64 len, get_extent_t *get_extent) | ||
2852 | { | ||
2853 | int ret; | ||
2854 | u64 off = start; | ||
2855 | u64 max = start + len; | ||
2856 | u32 flags = 0; | ||
2857 | u64 disko = 0; | ||
2858 | struct extent_map *em = NULL; | ||
2859 | int end = 0; | ||
2860 | u64 em_start = 0, em_len = 0; | ||
2861 | unsigned long emflags; | ||
2862 | ret = 0; | ||
2863 | |||
2864 | if (len == 0) | ||
2865 | return -EINVAL; | ||
2866 | |||
2867 | lock_extent(&BTRFS_I(inode)->io_tree, start, start + len, | ||
2868 | GFP_NOFS); | ||
2869 | em = get_extent(inode, NULL, 0, off, max - off, 0); | ||
2870 | if (!em) | ||
2871 | goto out; | ||
2872 | if (IS_ERR(em)) { | ||
2873 | ret = PTR_ERR(em); | ||
2874 | goto out; | ||
2875 | } | ||
2876 | while (!end) { | ||
2877 | off = em->start + em->len; | ||
2878 | if (off >= max) | ||
2879 | end = 1; | ||
2880 | |||
2881 | em_start = em->start; | ||
2882 | em_len = em->len; | ||
2883 | |||
2884 | disko = 0; | ||
2885 | flags = 0; | ||
2886 | |||
2887 | switch (em->block_start) { | ||
2888 | case EXTENT_MAP_LAST_BYTE: | ||
2889 | end = 1; | ||
2890 | flags |= FIEMAP_EXTENT_LAST; | ||
2891 | break; | ||
2892 | case EXTENT_MAP_HOLE: | ||
2893 | flags |= FIEMAP_EXTENT_UNWRITTEN; | ||
2894 | break; | ||
2895 | case EXTENT_MAP_INLINE: | ||
2896 | flags |= (FIEMAP_EXTENT_DATA_INLINE | | ||
2897 | FIEMAP_EXTENT_NOT_ALIGNED); | ||
2898 | break; | ||
2899 | case EXTENT_MAP_DELALLOC: | ||
2900 | flags |= (FIEMAP_EXTENT_DELALLOC | | ||
2901 | FIEMAP_EXTENT_UNKNOWN); | ||
2902 | break; | ||
2903 | default: | ||
2904 | disko = em->block_start; | ||
2905 | break; | ||
2906 | } | ||
2907 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) | ||
2908 | flags |= FIEMAP_EXTENT_ENCODED; | ||
2909 | |||
2910 | emflags = em->flags; | ||
2911 | free_extent_map(em); | ||
2912 | em = NULL; | ||
2913 | |||
2914 | if (!end) { | ||
2915 | em = get_extent(inode, NULL, 0, off, max - off, 0); | ||
2916 | if (!em) | ||
2917 | goto out; | ||
2918 | if (IS_ERR(em)) { | ||
2919 | ret = PTR_ERR(em); | ||
2920 | goto out; | ||
2921 | } | ||
2922 | emflags = em->flags; | ||
2923 | } | ||
2924 | if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) { | ||
2925 | flags |= FIEMAP_EXTENT_LAST; | ||
2926 | end = 1; | ||
2927 | } | ||
2928 | |||
2929 | ret = fiemap_fill_next_extent(fieinfo, em_start, disko, | ||
2930 | em_len, flags); | ||
2931 | if (ret) | ||
2932 | goto out_free; | ||
2933 | } | ||
2934 | out_free: | ||
2935 | free_extent_map(em); | ||
2936 | out: | ||
2937 | unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len, | ||
2938 | GFP_NOFS); | ||
2939 | return ret; | ||
2940 | } | ||
2941 | |||
2858 | static inline struct page *extent_buffer_page(struct extent_buffer *eb, | 2942 | static inline struct page *extent_buffer_page(struct extent_buffer *eb, |
2859 | unsigned long i) | 2943 | unsigned long i) |
2860 | { | 2944 | { |
@@ -2892,15 +2976,17 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, | |||
2892 | gfp_t mask) | 2976 | gfp_t mask) |
2893 | { | 2977 | { |
2894 | struct extent_buffer *eb = NULL; | 2978 | struct extent_buffer *eb = NULL; |
2895 | #ifdef LEAK_DEBUG | 2979 | #if LEAK_DEBUG |
2896 | unsigned long flags; | 2980 | unsigned long flags; |
2897 | #endif | 2981 | #endif |
2898 | 2982 | ||
2899 | eb = kmem_cache_zalloc(extent_buffer_cache, mask); | 2983 | eb = kmem_cache_zalloc(extent_buffer_cache, mask); |
2900 | eb->start = start; | 2984 | eb->start = start; |
2901 | eb->len = len; | 2985 | eb->len = len; |
2902 | mutex_init(&eb->mutex); | 2986 | spin_lock_init(&eb->lock); |
2903 | #ifdef LEAK_DEBUG | 2987 | init_waitqueue_head(&eb->lock_wq); |
2988 | |||
2989 | #if LEAK_DEBUG | ||
2904 | spin_lock_irqsave(&leak_lock, flags); | 2990 | spin_lock_irqsave(&leak_lock, flags); |
2905 | list_add(&eb->leak_list, &buffers); | 2991 | list_add(&eb->leak_list, &buffers); |
2906 | spin_unlock_irqrestore(&leak_lock, flags); | 2992 | spin_unlock_irqrestore(&leak_lock, flags); |
@@ -2912,7 +2998,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, | |||
2912 | 2998 | ||
2913 | static void __free_extent_buffer(struct extent_buffer *eb) | 2999 | static void __free_extent_buffer(struct extent_buffer *eb) |
2914 | { | 3000 | { |
2915 | #ifdef LEAK_DEBUG | 3001 | #if LEAK_DEBUG |
2916 | unsigned long flags; | 3002 | unsigned long flags; |
2917 | spin_lock_irqsave(&leak_lock, flags); | 3003 | spin_lock_irqsave(&leak_lock, flags); |
2918 | list_del(&eb->leak_list); | 3004 | list_del(&eb->leak_list); |
@@ -2980,8 +3066,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
2980 | unlock_page(p); | 3066 | unlock_page(p); |
2981 | } | 3067 | } |
2982 | if (uptodate) | 3068 | if (uptodate) |
2983 | eb->flags |= EXTENT_UPTODATE; | 3069 | set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); |
2984 | eb->flags |= EXTENT_BUFFER_FILLED; | ||
2985 | 3070 | ||
2986 | spin_lock(&tree->buffer_lock); | 3071 | spin_lock(&tree->buffer_lock); |
2987 | exists = buffer_tree_insert(tree, start, &eb->rb_node); | 3072 | exists = buffer_tree_insert(tree, start, &eb->rb_node); |
@@ -3135,7 +3220,7 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree, | |||
3135 | unsigned long num_pages; | 3220 | unsigned long num_pages; |
3136 | 3221 | ||
3137 | num_pages = num_extent_pages(eb->start, eb->len); | 3222 | num_pages = num_extent_pages(eb->start, eb->len); |
3138 | eb->flags &= ~EXTENT_UPTODATE; | 3223 | clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); |
3139 | 3224 | ||
3140 | clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, | 3225 | clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, |
3141 | GFP_NOFS); | 3226 | GFP_NOFS); |
@@ -3206,7 +3291,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree, | |||
3206 | struct page *page; | 3291 | struct page *page; |
3207 | int pg_uptodate = 1; | 3292 | int pg_uptodate = 1; |
3208 | 3293 | ||
3209 | if (eb->flags & EXTENT_UPTODATE) | 3294 | if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) |
3210 | return 1; | 3295 | return 1; |
3211 | 3296 | ||
3212 | ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, | 3297 | ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, |
@@ -3242,7 +3327,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, | |||
3242 | struct bio *bio = NULL; | 3327 | struct bio *bio = NULL; |
3243 | unsigned long bio_flags = 0; | 3328 | unsigned long bio_flags = 0; |
3244 | 3329 | ||
3245 | if (eb->flags & EXTENT_UPTODATE) | 3330 | if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) |
3246 | return 0; | 3331 | return 0; |
3247 | 3332 | ||
3248 | if (test_range_bit(tree, eb->start, eb->start + eb->len - 1, | 3333 | if (test_range_bit(tree, eb->start, eb->start + eb->len - 1, |
@@ -3273,7 +3358,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, | |||
3273 | } | 3358 | } |
3274 | if (all_uptodate) { | 3359 | if (all_uptodate) { |
3275 | if (start_i == 0) | 3360 | if (start_i == 0) |
3276 | eb->flags |= EXTENT_UPTODATE; | 3361 | set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); |
3277 | goto unlock_exit; | 3362 | goto unlock_exit; |
3278 | } | 3363 | } |
3279 | 3364 | ||
@@ -3309,7 +3394,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, | |||
3309 | } | 3394 | } |
3310 | 3395 | ||
3311 | if (!ret) | 3396 | if (!ret) |
3312 | eb->flags |= EXTENT_UPTODATE; | 3397 | set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); |
3313 | return ret; | 3398 | return ret; |
3314 | 3399 | ||
3315 | unlock_exit: | 3400 | unlock_exit: |
@@ -3406,7 +3491,6 @@ int map_extent_buffer(struct extent_buffer *eb, unsigned long start, | |||
3406 | unmap_extent_buffer(eb, eb->map_token, km); | 3491 | unmap_extent_buffer(eb, eb->map_token, km); |
3407 | eb->map_token = NULL; | 3492 | eb->map_token = NULL; |
3408 | save = 1; | 3493 | save = 1; |
3409 | WARN_ON(!mutex_is_locked(&eb->mutex)); | ||
3410 | } | 3494 | } |
3411 | err = map_private_extent_buffer(eb, start, min_len, token, map, | 3495 | err = map_private_extent_buffer(eb, start, min_len, token, map, |
3412 | map_start, map_len, km); | 3496 | map_start, map_len, km); |