diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-30 23:08:20 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-30 23:08:20 -0500 |
commit | e7651b819e90da924991d727d3c007200a18670d (patch) | |
tree | e7a943b5bb56c384972944fd86767a3f079b8a98 /fs/btrfs/extent_io.c | |
parent | 060e8e3b6f8fc0ba97de2276249fbd80fa25b0a2 (diff) | |
parent | cf93da7bcf450cb4595055d491a0519cb39e68ed (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs updates from Chris Mason:
"This is a pretty big pull, and most of these changes have been
floating in btrfs-next for a long time. Filipe's properties work is a
cool building block for inheriting attributes like compression down on
a per inode basis.
Jeff Mahoney kicked in code to export filesystem info into sysfs.
Otherwise, lots of performance improvements, cleanups and bug fixes.
Looks like there are still a few other small pending incrementals, but
I wanted to get the bulk of this in first"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (149 commits)
Btrfs: fix spin_unlock in check_ref_cleanup
Btrfs: setup inode location during btrfs_init_inode_locked
Btrfs: don't use ram_bytes for uncompressed inline items
Btrfs: fix btrfs_search_slot_for_read backwards iteration
Btrfs: do not export ulist functions
Btrfs: rework ulist with list+rb_tree
Btrfs: fix memory leaks on walking backrefs failure
Btrfs: fix send file hole detection leading to data corruption
Btrfs: add a reschedule point in btrfs_find_all_roots()
Btrfs: make send's file extent item search more efficient
Btrfs: fix to catch all errors when resolving indirect ref
Btrfs: fix protection between walking backrefs and root deletion
btrfs: fix warning while merging two adjacent extents
Btrfs: fix infinite path build loops in incremental send
btrfs: undo sysfs when open_ctree() fails
Btrfs: fix snprintf usage by send's gen_unique_name
btrfs: fix defrag 32-bit integer overflow
btrfs: sysfs: list the NO_HOLES feature
btrfs: sysfs: don't show reserved incompat feature
btrfs: call permission checks earlier in ioctls and return EPERM
...
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 228 |
1 files changed, 139 insertions, 89 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index bcb6f1b780d6..85bbd01f1271 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -59,7 +59,7 @@ void btrfs_leak_debug_check(void) | |||
59 | 59 | ||
60 | while (!list_empty(&states)) { | 60 | while (!list_empty(&states)) { |
61 | state = list_entry(states.next, struct extent_state, leak_list); | 61 | state = list_entry(states.next, struct extent_state, leak_list); |
62 | printk(KERN_ERR "btrfs state leak: start %llu end %llu " | 62 | printk(KERN_ERR "BTRFS: state leak: start %llu end %llu " |
63 | "state %lu in tree %p refs %d\n", | 63 | "state %lu in tree %p refs %d\n", |
64 | state->start, state->end, state->state, state->tree, | 64 | state->start, state->end, state->state, state->tree, |
65 | atomic_read(&state->refs)); | 65 | atomic_read(&state->refs)); |
@@ -69,7 +69,7 @@ void btrfs_leak_debug_check(void) | |||
69 | 69 | ||
70 | while (!list_empty(&buffers)) { | 70 | while (!list_empty(&buffers)) { |
71 | eb = list_entry(buffers.next, struct extent_buffer, leak_list); | 71 | eb = list_entry(buffers.next, struct extent_buffer, leak_list); |
72 | printk(KERN_ERR "btrfs buffer leak start %llu len %lu " | 72 | printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu " |
73 | "refs %d\n", | 73 | "refs %d\n", |
74 | eb->start, eb->len, atomic_read(&eb->refs)); | 74 | eb->start, eb->len, atomic_read(&eb->refs)); |
75 | list_del(&eb->leak_list); | 75 | list_del(&eb->leak_list); |
@@ -77,16 +77,22 @@ void btrfs_leak_debug_check(void) | |||
77 | } | 77 | } |
78 | } | 78 | } |
79 | 79 | ||
80 | #define btrfs_debug_check_extent_io_range(inode, start, end) \ | 80 | #define btrfs_debug_check_extent_io_range(tree, start, end) \ |
81 | __btrfs_debug_check_extent_io_range(__func__, (inode), (start), (end)) | 81 | __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end)) |
82 | static inline void __btrfs_debug_check_extent_io_range(const char *caller, | 82 | static inline void __btrfs_debug_check_extent_io_range(const char *caller, |
83 | struct inode *inode, u64 start, u64 end) | 83 | struct extent_io_tree *tree, u64 start, u64 end) |
84 | { | 84 | { |
85 | u64 isize = i_size_read(inode); | 85 | struct inode *inode; |
86 | u64 isize; | ||
87 | |||
88 | if (!tree->mapping) | ||
89 | return; | ||
86 | 90 | ||
91 | inode = tree->mapping->host; | ||
92 | isize = i_size_read(inode); | ||
87 | if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) { | 93 | if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) { |
88 | printk_ratelimited(KERN_DEBUG | 94 | printk_ratelimited(KERN_DEBUG |
89 | "btrfs: %s: ino %llu isize %llu odd range [%llu,%llu]\n", | 95 | "BTRFS: %s: ino %llu isize %llu odd range [%llu,%llu]\n", |
90 | caller, btrfs_ino(inode), isize, start, end); | 96 | caller, btrfs_ino(inode), isize, start, end); |
91 | } | 97 | } |
92 | } | 98 | } |
@@ -124,6 +130,8 @@ static noinline void flush_write_bio(void *data); | |||
124 | static inline struct btrfs_fs_info * | 130 | static inline struct btrfs_fs_info * |
125 | tree_fs_info(struct extent_io_tree *tree) | 131 | tree_fs_info(struct extent_io_tree *tree) |
126 | { | 132 | { |
133 | if (!tree->mapping) | ||
134 | return NULL; | ||
127 | return btrfs_sb(tree->mapping->host->i_sb); | 135 | return btrfs_sb(tree->mapping->host->i_sb); |
128 | } | 136 | } |
129 | 137 | ||
@@ -186,11 +194,9 @@ void extent_io_tree_init(struct extent_io_tree *tree, | |||
186 | struct address_space *mapping) | 194 | struct address_space *mapping) |
187 | { | 195 | { |
188 | tree->state = RB_ROOT; | 196 | tree->state = RB_ROOT; |
189 | INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC); | ||
190 | tree->ops = NULL; | 197 | tree->ops = NULL; |
191 | tree->dirty_bytes = 0; | 198 | tree->dirty_bytes = 0; |
192 | spin_lock_init(&tree->lock); | 199 | spin_lock_init(&tree->lock); |
193 | spin_lock_init(&tree->buffer_lock); | ||
194 | tree->mapping = mapping; | 200 | tree->mapping = mapping; |
195 | } | 201 | } |
196 | 202 | ||
@@ -224,12 +230,20 @@ void free_extent_state(struct extent_state *state) | |||
224 | } | 230 | } |
225 | 231 | ||
226 | static struct rb_node *tree_insert(struct rb_root *root, u64 offset, | 232 | static struct rb_node *tree_insert(struct rb_root *root, u64 offset, |
227 | struct rb_node *node) | 233 | struct rb_node *node, |
234 | struct rb_node ***p_in, | ||
235 | struct rb_node **parent_in) | ||
228 | { | 236 | { |
229 | struct rb_node **p = &root->rb_node; | 237 | struct rb_node **p = &root->rb_node; |
230 | struct rb_node *parent = NULL; | 238 | struct rb_node *parent = NULL; |
231 | struct tree_entry *entry; | 239 | struct tree_entry *entry; |
232 | 240 | ||
241 | if (p_in && parent_in) { | ||
242 | p = *p_in; | ||
243 | parent = *parent_in; | ||
244 | goto do_insert; | ||
245 | } | ||
246 | |||
233 | while (*p) { | 247 | while (*p) { |
234 | parent = *p; | 248 | parent = *p; |
235 | entry = rb_entry(parent, struct tree_entry, rb_node); | 249 | entry = rb_entry(parent, struct tree_entry, rb_node); |
@@ -242,35 +256,43 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 offset, | |||
242 | return parent; | 256 | return parent; |
243 | } | 257 | } |
244 | 258 | ||
259 | do_insert: | ||
245 | rb_link_node(node, parent, p); | 260 | rb_link_node(node, parent, p); |
246 | rb_insert_color(node, root); | 261 | rb_insert_color(node, root); |
247 | return NULL; | 262 | return NULL; |
248 | } | 263 | } |
249 | 264 | ||
250 | static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, | 265 | static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, |
251 | struct rb_node **prev_ret, | 266 | struct rb_node **prev_ret, |
252 | struct rb_node **next_ret) | 267 | struct rb_node **next_ret, |
268 | struct rb_node ***p_ret, | ||
269 | struct rb_node **parent_ret) | ||
253 | { | 270 | { |
254 | struct rb_root *root = &tree->state; | 271 | struct rb_root *root = &tree->state; |
255 | struct rb_node *n = root->rb_node; | 272 | struct rb_node **n = &root->rb_node; |
256 | struct rb_node *prev = NULL; | 273 | struct rb_node *prev = NULL; |
257 | struct rb_node *orig_prev = NULL; | 274 | struct rb_node *orig_prev = NULL; |
258 | struct tree_entry *entry; | 275 | struct tree_entry *entry; |
259 | struct tree_entry *prev_entry = NULL; | 276 | struct tree_entry *prev_entry = NULL; |
260 | 277 | ||
261 | while (n) { | 278 | while (*n) { |
262 | entry = rb_entry(n, struct tree_entry, rb_node); | 279 | prev = *n; |
263 | prev = n; | 280 | entry = rb_entry(prev, struct tree_entry, rb_node); |
264 | prev_entry = entry; | 281 | prev_entry = entry; |
265 | 282 | ||
266 | if (offset < entry->start) | 283 | if (offset < entry->start) |
267 | n = n->rb_left; | 284 | n = &(*n)->rb_left; |
268 | else if (offset > entry->end) | 285 | else if (offset > entry->end) |
269 | n = n->rb_right; | 286 | n = &(*n)->rb_right; |
270 | else | 287 | else |
271 | return n; | 288 | return *n; |
272 | } | 289 | } |
273 | 290 | ||
291 | if (p_ret) | ||
292 | *p_ret = n; | ||
293 | if (parent_ret) | ||
294 | *parent_ret = prev; | ||
295 | |||
274 | if (prev_ret) { | 296 | if (prev_ret) { |
275 | orig_prev = prev; | 297 | orig_prev = prev; |
276 | while (prev && offset > prev_entry->end) { | 298 | while (prev && offset > prev_entry->end) { |
@@ -292,18 +314,27 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, | |||
292 | return NULL; | 314 | return NULL; |
293 | } | 315 | } |
294 | 316 | ||
295 | static inline struct rb_node *tree_search(struct extent_io_tree *tree, | 317 | static inline struct rb_node * |
296 | u64 offset) | 318 | tree_search_for_insert(struct extent_io_tree *tree, |
319 | u64 offset, | ||
320 | struct rb_node ***p_ret, | ||
321 | struct rb_node **parent_ret) | ||
297 | { | 322 | { |
298 | struct rb_node *prev = NULL; | 323 | struct rb_node *prev = NULL; |
299 | struct rb_node *ret; | 324 | struct rb_node *ret; |
300 | 325 | ||
301 | ret = __etree_search(tree, offset, &prev, NULL); | 326 | ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret); |
302 | if (!ret) | 327 | if (!ret) |
303 | return prev; | 328 | return prev; |
304 | return ret; | 329 | return ret; |
305 | } | 330 | } |
306 | 331 | ||
332 | static inline struct rb_node *tree_search(struct extent_io_tree *tree, | ||
333 | u64 offset) | ||
334 | { | ||
335 | return tree_search_for_insert(tree, offset, NULL, NULL); | ||
336 | } | ||
337 | |||
307 | static void merge_cb(struct extent_io_tree *tree, struct extent_state *new, | 338 | static void merge_cb(struct extent_io_tree *tree, struct extent_state *new, |
308 | struct extent_state *other) | 339 | struct extent_state *other) |
309 | { | 340 | { |
@@ -385,23 +416,25 @@ static void set_state_bits(struct extent_io_tree *tree, | |||
385 | */ | 416 | */ |
386 | static int insert_state(struct extent_io_tree *tree, | 417 | static int insert_state(struct extent_io_tree *tree, |
387 | struct extent_state *state, u64 start, u64 end, | 418 | struct extent_state *state, u64 start, u64 end, |
419 | struct rb_node ***p, | ||
420 | struct rb_node **parent, | ||
388 | unsigned long *bits) | 421 | unsigned long *bits) |
389 | { | 422 | { |
390 | struct rb_node *node; | 423 | struct rb_node *node; |
391 | 424 | ||
392 | if (end < start) | 425 | if (end < start) |
393 | WARN(1, KERN_ERR "btrfs end < start %llu %llu\n", | 426 | WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n", |
394 | end, start); | 427 | end, start); |
395 | state->start = start; | 428 | state->start = start; |
396 | state->end = end; | 429 | state->end = end; |
397 | 430 | ||
398 | set_state_bits(tree, state, bits); | 431 | set_state_bits(tree, state, bits); |
399 | 432 | ||
400 | node = tree_insert(&tree->state, end, &state->rb_node); | 433 | node = tree_insert(&tree->state, end, &state->rb_node, p, parent); |
401 | if (node) { | 434 | if (node) { |
402 | struct extent_state *found; | 435 | struct extent_state *found; |
403 | found = rb_entry(node, struct extent_state, rb_node); | 436 | found = rb_entry(node, struct extent_state, rb_node); |
404 | printk(KERN_ERR "btrfs found node %llu %llu on insert of " | 437 | printk(KERN_ERR "BTRFS: found node %llu %llu on insert of " |
405 | "%llu %llu\n", | 438 | "%llu %llu\n", |
406 | found->start, found->end, start, end); | 439 | found->start, found->end, start, end); |
407 | return -EEXIST; | 440 | return -EEXIST; |
@@ -444,7 +477,8 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig, | |||
444 | prealloc->state = orig->state; | 477 | prealloc->state = orig->state; |
445 | orig->start = split; | 478 | orig->start = split; |
446 | 479 | ||
447 | node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node); | 480 | node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node, |
481 | NULL, NULL); | ||
448 | if (node) { | 482 | if (node) { |
449 | free_extent_state(prealloc); | 483 | free_extent_state(prealloc); |
450 | return -EEXIST; | 484 | return -EEXIST; |
@@ -542,7 +576,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
542 | int err; | 576 | int err; |
543 | int clear = 0; | 577 | int clear = 0; |
544 | 578 | ||
545 | btrfs_debug_check_extent_io_range(tree->mapping->host, start, end); | 579 | btrfs_debug_check_extent_io_range(tree, start, end); |
546 | 580 | ||
547 | if (bits & EXTENT_DELALLOC) | 581 | if (bits & EXTENT_DELALLOC) |
548 | bits |= EXTENT_NORESERVE; | 582 | bits |= EXTENT_NORESERVE; |
@@ -702,7 +736,7 @@ static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
702 | struct extent_state *state; | 736 | struct extent_state *state; |
703 | struct rb_node *node; | 737 | struct rb_node *node; |
704 | 738 | ||
705 | btrfs_debug_check_extent_io_range(tree->mapping->host, start, end); | 739 | btrfs_debug_check_extent_io_range(tree, start, end); |
706 | 740 | ||
707 | spin_lock(&tree->lock); | 741 | spin_lock(&tree->lock); |
708 | again: | 742 | again: |
@@ -783,11 +817,13 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
783 | struct extent_state *state; | 817 | struct extent_state *state; |
784 | struct extent_state *prealloc = NULL; | 818 | struct extent_state *prealloc = NULL; |
785 | struct rb_node *node; | 819 | struct rb_node *node; |
820 | struct rb_node **p; | ||
821 | struct rb_node *parent; | ||
786 | int err = 0; | 822 | int err = 0; |
787 | u64 last_start; | 823 | u64 last_start; |
788 | u64 last_end; | 824 | u64 last_end; |
789 | 825 | ||
790 | btrfs_debug_check_extent_io_range(tree->mapping->host, start, end); | 826 | btrfs_debug_check_extent_io_range(tree, start, end); |
791 | 827 | ||
792 | bits |= EXTENT_FIRST_DELALLOC; | 828 | bits |= EXTENT_FIRST_DELALLOC; |
793 | again: | 829 | again: |
@@ -809,14 +845,16 @@ again: | |||
809 | * this search will find all the extents that end after | 845 | * this search will find all the extents that end after |
810 | * our range starts. | 846 | * our range starts. |
811 | */ | 847 | */ |
812 | node = tree_search(tree, start); | 848 | node = tree_search_for_insert(tree, start, &p, &parent); |
813 | if (!node) { | 849 | if (!node) { |
814 | prealloc = alloc_extent_state_atomic(prealloc); | 850 | prealloc = alloc_extent_state_atomic(prealloc); |
815 | BUG_ON(!prealloc); | 851 | BUG_ON(!prealloc); |
816 | err = insert_state(tree, prealloc, start, end, &bits); | 852 | err = insert_state(tree, prealloc, start, end, |
853 | &p, &parent, &bits); | ||
817 | if (err) | 854 | if (err) |
818 | extent_io_tree_panic(tree, err); | 855 | extent_io_tree_panic(tree, err); |
819 | 856 | ||
857 | cache_state(prealloc, cached_state); | ||
820 | prealloc = NULL; | 858 | prealloc = NULL; |
821 | goto out; | 859 | goto out; |
822 | } | 860 | } |
@@ -919,7 +957,7 @@ hit_next: | |||
919 | * the later extent. | 957 | * the later extent. |
920 | */ | 958 | */ |
921 | err = insert_state(tree, prealloc, start, this_end, | 959 | err = insert_state(tree, prealloc, start, this_end, |
922 | &bits); | 960 | NULL, NULL, &bits); |
923 | if (err) | 961 | if (err) |
924 | extent_io_tree_panic(tree, err); | 962 | extent_io_tree_panic(tree, err); |
925 | 963 | ||
@@ -1005,11 +1043,13 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
1005 | struct extent_state *state; | 1043 | struct extent_state *state; |
1006 | struct extent_state *prealloc = NULL; | 1044 | struct extent_state *prealloc = NULL; |
1007 | struct rb_node *node; | 1045 | struct rb_node *node; |
1046 | struct rb_node **p; | ||
1047 | struct rb_node *parent; | ||
1008 | int err = 0; | 1048 | int err = 0; |
1009 | u64 last_start; | 1049 | u64 last_start; |
1010 | u64 last_end; | 1050 | u64 last_end; |
1011 | 1051 | ||
1012 | btrfs_debug_check_extent_io_range(tree->mapping->host, start, end); | 1052 | btrfs_debug_check_extent_io_range(tree, start, end); |
1013 | 1053 | ||
1014 | again: | 1054 | again: |
1015 | if (!prealloc && (mask & __GFP_WAIT)) { | 1055 | if (!prealloc && (mask & __GFP_WAIT)) { |
@@ -1032,17 +1072,19 @@ again: | |||
1032 | * this search will find all the extents that end after | 1072 | * this search will find all the extents that end after |
1033 | * our range starts. | 1073 | * our range starts. |
1034 | */ | 1074 | */ |
1035 | node = tree_search(tree, start); | 1075 | node = tree_search_for_insert(tree, start, &p, &parent); |
1036 | if (!node) { | 1076 | if (!node) { |
1037 | prealloc = alloc_extent_state_atomic(prealloc); | 1077 | prealloc = alloc_extent_state_atomic(prealloc); |
1038 | if (!prealloc) { | 1078 | if (!prealloc) { |
1039 | err = -ENOMEM; | 1079 | err = -ENOMEM; |
1040 | goto out; | 1080 | goto out; |
1041 | } | 1081 | } |
1042 | err = insert_state(tree, prealloc, start, end, &bits); | 1082 | err = insert_state(tree, prealloc, start, end, |
1043 | prealloc = NULL; | 1083 | &p, &parent, &bits); |
1044 | if (err) | 1084 | if (err) |
1045 | extent_io_tree_panic(tree, err); | 1085 | extent_io_tree_panic(tree, err); |
1086 | cache_state(prealloc, cached_state); | ||
1087 | prealloc = NULL; | ||
1046 | goto out; | 1088 | goto out; |
1047 | } | 1089 | } |
1048 | state = rb_entry(node, struct extent_state, rb_node); | 1090 | state = rb_entry(node, struct extent_state, rb_node); |
@@ -1135,7 +1177,7 @@ hit_next: | |||
1135 | * the later extent. | 1177 | * the later extent. |
1136 | */ | 1178 | */ |
1137 | err = insert_state(tree, prealloc, start, this_end, | 1179 | err = insert_state(tree, prealloc, start, this_end, |
1138 | &bits); | 1180 | NULL, NULL, &bits); |
1139 | if (err) | 1181 | if (err) |
1140 | extent_io_tree_panic(tree, err); | 1182 | extent_io_tree_panic(tree, err); |
1141 | cache_state(prealloc, cached_state); | 1183 | cache_state(prealloc, cached_state); |
@@ -2012,9 +2054,10 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start, | |||
2012 | return -EIO; | 2054 | return -EIO; |
2013 | } | 2055 | } |
2014 | 2056 | ||
2015 | printk_ratelimited_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu " | 2057 | printk_ratelimited_in_rcu(KERN_INFO |
2016 | "(dev %s sector %llu)\n", page->mapping->host->i_ino, | 2058 | "BTRFS: read error corrected: ino %lu off %llu " |
2017 | start, rcu_str_deref(dev->name), sector); | 2059 | "(dev %s sector %llu)\n", page->mapping->host->i_ino, |
2060 | start, rcu_str_deref(dev->name), sector); | ||
2018 | 2061 | ||
2019 | bio_put(bio); | 2062 | bio_put(bio); |
2020 | return 0; | 2063 | return 0; |
@@ -2156,7 +2199,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, | |||
2156 | return -EIO; | 2199 | return -EIO; |
2157 | } | 2200 | } |
2158 | 2201 | ||
2159 | if (em->start > start || em->start + em->len < start) { | 2202 | if (em->start > start || em->start + em->len <= start) { |
2160 | free_extent_map(em); | 2203 | free_extent_map(em); |
2161 | em = NULL; | 2204 | em = NULL; |
2162 | } | 2205 | } |
@@ -2333,25 +2376,29 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end) | |||
2333 | static void end_bio_extent_writepage(struct bio *bio, int err) | 2376 | static void end_bio_extent_writepage(struct bio *bio, int err) |
2334 | { | 2377 | { |
2335 | struct bio_vec *bvec; | 2378 | struct bio_vec *bvec; |
2336 | struct extent_io_tree *tree; | ||
2337 | u64 start; | 2379 | u64 start; |
2338 | u64 end; | 2380 | u64 end; |
2339 | int i; | 2381 | int i; |
2340 | 2382 | ||
2341 | bio_for_each_segment_all(bvec, bio, i) { | 2383 | bio_for_each_segment_all(bvec, bio, i) { |
2342 | struct page *page = bvec->bv_page; | 2384 | struct page *page = bvec->bv_page; |
2343 | tree = &BTRFS_I(page->mapping->host)->io_tree; | ||
2344 | 2385 | ||
2345 | /* We always issue full-page reads, but if some block | 2386 | /* We always issue full-page reads, but if some block |
2346 | * in a page fails to read, blk_update_request() will | 2387 | * in a page fails to read, blk_update_request() will |
2347 | * advance bv_offset and adjust bv_len to compensate. | 2388 | * advance bv_offset and adjust bv_len to compensate. |
2348 | * Print a warning for nonzero offsets, and an error | 2389 | * Print a warning for nonzero offsets, and an error |
2349 | * if they don't add up to a full page. */ | 2390 | * if they don't add up to a full page. */ |
2350 | if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) | 2391 | if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { |
2351 | printk("%s page write in btrfs with offset %u and length %u\n", | 2392 | if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) |
2352 | bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE | 2393 | btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, |
2353 | ? KERN_ERR "partial" : KERN_INFO "incomplete", | 2394 | "partial page write in btrfs with offset %u and length %u", |
2354 | bvec->bv_offset, bvec->bv_len); | 2395 | bvec->bv_offset, bvec->bv_len); |
2396 | else | ||
2397 | btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info, | ||
2398 | "incomplete page write in btrfs with offset %u and " | ||
2399 | "length %u", | ||
2400 | bvec->bv_offset, bvec->bv_len); | ||
2401 | } | ||
2355 | 2402 | ||
2356 | start = page_offset(page); | 2403 | start = page_offset(page); |
2357 | end = start + bvec->bv_offset + bvec->bv_len - 1; | 2404 | end = start + bvec->bv_offset + bvec->bv_len - 1; |
@@ -2421,11 +2468,17 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
2421 | * advance bv_offset and adjust bv_len to compensate. | 2468 | * advance bv_offset and adjust bv_len to compensate. |
2422 | * Print a warning for nonzero offsets, and an error | 2469 | * Print a warning for nonzero offsets, and an error |
2423 | * if they don't add up to a full page. */ | 2470 | * if they don't add up to a full page. */ |
2424 | if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) | 2471 | if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { |
2425 | printk("%s page read in btrfs with offset %u and length %u\n", | 2472 | if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) |
2426 | bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE | 2473 | btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, |
2427 | ? KERN_ERR "partial" : KERN_INFO "incomplete", | 2474 | "partial page read in btrfs with offset %u and length %u", |
2428 | bvec->bv_offset, bvec->bv_len); | 2475 | bvec->bv_offset, bvec->bv_len); |
2476 | else | ||
2477 | btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info, | ||
2478 | "incomplete page read in btrfs with offset %u and " | ||
2479 | "length %u", | ||
2480 | bvec->bv_offset, bvec->bv_len); | ||
2481 | } | ||
2429 | 2482 | ||
2430 | start = page_offset(page); | 2483 | start = page_offset(page); |
2431 | end = start + bvec->bv_offset + bvec->bv_len - 1; | 2484 | end = start + bvec->bv_offset + bvec->bv_len - 1; |
@@ -3281,8 +3334,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
3281 | 3334 | ||
3282 | set_range_writeback(tree, cur, cur + iosize - 1); | 3335 | set_range_writeback(tree, cur, cur + iosize - 1); |
3283 | if (!PageWriteback(page)) { | 3336 | if (!PageWriteback(page)) { |
3284 | printk(KERN_ERR "btrfs warning page %lu not " | 3337 | btrfs_err(BTRFS_I(inode)->root->fs_info, |
3285 | "writeback, cur %llu end %llu\n", | 3338 | "page %lu not writeback, cur %llu end %llu", |
3286 | page->index, cur, end); | 3339 | page->index, cur, end); |
3287 | } | 3340 | } |
3288 | 3341 | ||
@@ -3438,6 +3491,7 @@ static int write_one_eb(struct extent_buffer *eb, | |||
3438 | struct extent_page_data *epd) | 3491 | struct extent_page_data *epd) |
3439 | { | 3492 | { |
3440 | struct block_device *bdev = fs_info->fs_devices->latest_bdev; | 3493 | struct block_device *bdev = fs_info->fs_devices->latest_bdev; |
3494 | struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree; | ||
3441 | u64 offset = eb->start; | 3495 | u64 offset = eb->start; |
3442 | unsigned long i, num_pages; | 3496 | unsigned long i, num_pages; |
3443 | unsigned long bio_flags = 0; | 3497 | unsigned long bio_flags = 0; |
@@ -3455,7 +3509,7 @@ static int write_one_eb(struct extent_buffer *eb, | |||
3455 | 3509 | ||
3456 | clear_page_dirty_for_io(p); | 3510 | clear_page_dirty_for_io(p); |
3457 | set_page_writeback(p); | 3511 | set_page_writeback(p); |
3458 | ret = submit_extent_page(rw, eb->tree, p, offset >> 9, | 3512 | ret = submit_extent_page(rw, tree, p, offset >> 9, |
3459 | PAGE_CACHE_SIZE, 0, bdev, &epd->bio, | 3513 | PAGE_CACHE_SIZE, 0, bdev, &epd->bio, |
3460 | -1, end_bio_extent_buffer_writepage, | 3514 | -1, end_bio_extent_buffer_writepage, |
3461 | 0, epd->bio_flags, bio_flags); | 3515 | 0, epd->bio_flags, bio_flags); |
@@ -4073,12 +4127,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
4073 | struct extent_map *em = NULL; | 4127 | struct extent_map *em = NULL; |
4074 | struct extent_state *cached_state = NULL; | 4128 | struct extent_state *cached_state = NULL; |
4075 | struct btrfs_path *path; | 4129 | struct btrfs_path *path; |
4076 | struct btrfs_file_extent_item *item; | ||
4077 | int end = 0; | 4130 | int end = 0; |
4078 | u64 em_start = 0; | 4131 | u64 em_start = 0; |
4079 | u64 em_len = 0; | 4132 | u64 em_len = 0; |
4080 | u64 em_end = 0; | 4133 | u64 em_end = 0; |
4081 | unsigned long emflags; | ||
4082 | 4134 | ||
4083 | if (len == 0) | 4135 | if (len == 0) |
4084 | return -EINVAL; | 4136 | return -EINVAL; |
@@ -4103,8 +4155,6 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
4103 | } | 4155 | } |
4104 | WARN_ON(!ret); | 4156 | WARN_ON(!ret); |
4105 | path->slots[0]--; | 4157 | path->slots[0]--; |
4106 | item = btrfs_item_ptr(path->nodes[0], path->slots[0], | ||
4107 | struct btrfs_file_extent_item); | ||
4108 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); | 4158 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); |
4109 | found_type = btrfs_key_type(&found_key); | 4159 | found_type = btrfs_key_type(&found_key); |
4110 | 4160 | ||
@@ -4172,7 +4222,6 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
4172 | offset_in_extent = em_start - em->start; | 4222 | offset_in_extent = em_start - em->start; |
4173 | em_end = extent_map_end(em); | 4223 | em_end = extent_map_end(em); |
4174 | em_len = em_end - em_start; | 4224 | em_len = em_end - em_start; |
4175 | emflags = em->flags; | ||
4176 | disko = 0; | 4225 | disko = 0; |
4177 | flags = 0; | 4226 | flags = 0; |
4178 | 4227 | ||
@@ -4324,10 +4373,9 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) | |||
4324 | __free_extent_buffer(eb); | 4373 | __free_extent_buffer(eb); |
4325 | } | 4374 | } |
4326 | 4375 | ||
4327 | static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, | 4376 | static struct extent_buffer * |
4328 | u64 start, | 4377 | __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, |
4329 | unsigned long len, | 4378 | unsigned long len, gfp_t mask) |
4330 | gfp_t mask) | ||
4331 | { | 4379 | { |
4332 | struct extent_buffer *eb = NULL; | 4380 | struct extent_buffer *eb = NULL; |
4333 | 4381 | ||
@@ -4336,7 +4384,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, | |||
4336 | return NULL; | 4384 | return NULL; |
4337 | eb->start = start; | 4385 | eb->start = start; |
4338 | eb->len = len; | 4386 | eb->len = len; |
4339 | eb->tree = tree; | 4387 | eb->fs_info = fs_info; |
4340 | eb->bflags = 0; | 4388 | eb->bflags = 0; |
4341 | rwlock_init(&eb->lock); | 4389 | rwlock_init(&eb->lock); |
4342 | atomic_set(&eb->write_locks, 0); | 4390 | atomic_set(&eb->write_locks, 0); |
@@ -4468,13 +4516,14 @@ static void mark_extent_buffer_accessed(struct extent_buffer *eb) | |||
4468 | } | 4516 | } |
4469 | } | 4517 | } |
4470 | 4518 | ||
4471 | struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, | 4519 | struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, |
4472 | u64 start) | 4520 | u64 start) |
4473 | { | 4521 | { |
4474 | struct extent_buffer *eb; | 4522 | struct extent_buffer *eb; |
4475 | 4523 | ||
4476 | rcu_read_lock(); | 4524 | rcu_read_lock(); |
4477 | eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); | 4525 | eb = radix_tree_lookup(&fs_info->buffer_radix, |
4526 | start >> PAGE_CACHE_SHIFT); | ||
4478 | if (eb && atomic_inc_not_zero(&eb->refs)) { | 4527 | if (eb && atomic_inc_not_zero(&eb->refs)) { |
4479 | rcu_read_unlock(); | 4528 | rcu_read_unlock(); |
4480 | mark_extent_buffer_accessed(eb); | 4529 | mark_extent_buffer_accessed(eb); |
@@ -4485,7 +4534,7 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, | |||
4485 | return NULL; | 4534 | return NULL; |
4486 | } | 4535 | } |
4487 | 4536 | ||
4488 | struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | 4537 | struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, |
4489 | u64 start, unsigned long len) | 4538 | u64 start, unsigned long len) |
4490 | { | 4539 | { |
4491 | unsigned long num_pages = num_extent_pages(start, len); | 4540 | unsigned long num_pages = num_extent_pages(start, len); |
@@ -4494,16 +4543,15 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
4494 | struct extent_buffer *eb; | 4543 | struct extent_buffer *eb; |
4495 | struct extent_buffer *exists = NULL; | 4544 | struct extent_buffer *exists = NULL; |
4496 | struct page *p; | 4545 | struct page *p; |
4497 | struct address_space *mapping = tree->mapping; | 4546 | struct address_space *mapping = fs_info->btree_inode->i_mapping; |
4498 | int uptodate = 1; | 4547 | int uptodate = 1; |
4499 | int ret; | 4548 | int ret; |
4500 | 4549 | ||
4501 | 4550 | eb = find_extent_buffer(fs_info, start); | |
4502 | eb = find_extent_buffer(tree, start); | ||
4503 | if (eb) | 4551 | if (eb) |
4504 | return eb; | 4552 | return eb; |
4505 | 4553 | ||
4506 | eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS); | 4554 | eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS); |
4507 | if (!eb) | 4555 | if (!eb) |
4508 | return NULL; | 4556 | return NULL; |
4509 | 4557 | ||
@@ -4558,12 +4606,13 @@ again: | |||
4558 | if (ret) | 4606 | if (ret) |
4559 | goto free_eb; | 4607 | goto free_eb; |
4560 | 4608 | ||
4561 | spin_lock(&tree->buffer_lock); | 4609 | spin_lock(&fs_info->buffer_lock); |
4562 | ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb); | 4610 | ret = radix_tree_insert(&fs_info->buffer_radix, |
4563 | spin_unlock(&tree->buffer_lock); | 4611 | start >> PAGE_CACHE_SHIFT, eb); |
4612 | spin_unlock(&fs_info->buffer_lock); | ||
4564 | radix_tree_preload_end(); | 4613 | radix_tree_preload_end(); |
4565 | if (ret == -EEXIST) { | 4614 | if (ret == -EEXIST) { |
4566 | exists = find_extent_buffer(tree, start); | 4615 | exists = find_extent_buffer(fs_info, start); |
4567 | if (exists) | 4616 | if (exists) |
4568 | goto free_eb; | 4617 | goto free_eb; |
4569 | else | 4618 | else |
@@ -4571,6 +4620,7 @@ again: | |||
4571 | } | 4620 | } |
4572 | /* add one reference for the tree */ | 4621 | /* add one reference for the tree */ |
4573 | check_buffer_tree_ref(eb); | 4622 | check_buffer_tree_ref(eb); |
4623 | set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); | ||
4574 | 4624 | ||
4575 | /* | 4625 | /* |
4576 | * there is a race where release page may have | 4626 | * there is a race where release page may have |
@@ -4614,17 +4664,17 @@ static int release_extent_buffer(struct extent_buffer *eb) | |||
4614 | { | 4664 | { |
4615 | WARN_ON(atomic_read(&eb->refs) == 0); | 4665 | WARN_ON(atomic_read(&eb->refs) == 0); |
4616 | if (atomic_dec_and_test(&eb->refs)) { | 4666 | if (atomic_dec_and_test(&eb->refs)) { |
4617 | if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) { | 4667 | if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { |
4618 | spin_unlock(&eb->refs_lock); | 4668 | struct btrfs_fs_info *fs_info = eb->fs_info; |
4619 | } else { | ||
4620 | struct extent_io_tree *tree = eb->tree; | ||
4621 | 4669 | ||
4622 | spin_unlock(&eb->refs_lock); | 4670 | spin_unlock(&eb->refs_lock); |
4623 | 4671 | ||
4624 | spin_lock(&tree->buffer_lock); | 4672 | spin_lock(&fs_info->buffer_lock); |
4625 | radix_tree_delete(&tree->buffer, | 4673 | radix_tree_delete(&fs_info->buffer_radix, |
4626 | eb->start >> PAGE_CACHE_SHIFT); | 4674 | eb->start >> PAGE_CACHE_SHIFT); |
4627 | spin_unlock(&tree->buffer_lock); | 4675 | spin_unlock(&fs_info->buffer_lock); |
4676 | } else { | ||
4677 | spin_unlock(&eb->refs_lock); | ||
4628 | } | 4678 | } |
4629 | 4679 | ||
4630 | /* Should be safe to release our pages at this point */ | 4680 | /* Should be safe to release our pages at this point */ |
@@ -5103,12 +5153,12 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, | |||
5103 | unsigned long src_i; | 5153 | unsigned long src_i; |
5104 | 5154 | ||
5105 | if (src_offset + len > dst->len) { | 5155 | if (src_offset + len > dst->len) { |
5106 | printk(KERN_ERR "btrfs memmove bogus src_offset %lu move " | 5156 | printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move " |
5107 | "len %lu dst len %lu\n", src_offset, len, dst->len); | 5157 | "len %lu dst len %lu\n", src_offset, len, dst->len); |
5108 | BUG_ON(1); | 5158 | BUG_ON(1); |
5109 | } | 5159 | } |
5110 | if (dst_offset + len > dst->len) { | 5160 | if (dst_offset + len > dst->len) { |
5111 | printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move " | 5161 | printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move " |
5112 | "len %lu dst len %lu\n", dst_offset, len, dst->len); | 5162 | "len %lu dst len %lu\n", dst_offset, len, dst->len); |
5113 | BUG_ON(1); | 5163 | BUG_ON(1); |
5114 | } | 5164 | } |
@@ -5150,12 +5200,12 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, | |||
5150 | unsigned long src_i; | 5200 | unsigned long src_i; |
5151 | 5201 | ||
5152 | if (src_offset + len > dst->len) { | 5202 | if (src_offset + len > dst->len) { |
5153 | printk(KERN_ERR "btrfs memmove bogus src_offset %lu move " | 5203 | printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move " |
5154 | "len %lu len %lu\n", src_offset, len, dst->len); | 5204 | "len %lu len %lu\n", src_offset, len, dst->len); |
5155 | BUG_ON(1); | 5205 | BUG_ON(1); |
5156 | } | 5206 | } |
5157 | if (dst_offset + len > dst->len) { | 5207 | if (dst_offset + len > dst->len) { |
5158 | printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move " | 5208 | printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move " |
5159 | "len %lu len %lu\n", dst_offset, len, dst->len); | 5209 | "len %lu len %lu\n", dst_offset, len, dst->len); |
5160 | BUG_ON(1); | 5210 | BUG_ON(1); |
5161 | } | 5211 | } |