diff options
Diffstat (limited to 'fs/btrfs/ordered-data.c')
-rw-r--r-- | fs/btrfs/ordered-data.c | 709 |
1 files changed, 709 insertions, 0 deletions
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c new file mode 100644 index 000000000000..da6d43eb41db --- /dev/null +++ b/fs/btrfs/ordered-data.c | |||
@@ -0,0 +1,709 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Oracle. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public | ||
6 | * License v2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public | ||
14 | * License along with this program; if not, write to the | ||
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
16 | * Boston, MA 021110-1307, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/gfp.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/blkdev.h> | ||
22 | #include <linux/writeback.h> | ||
23 | #include <linux/pagevec.h> | ||
24 | #include "ctree.h" | ||
25 | #include "transaction.h" | ||
26 | #include "btrfs_inode.h" | ||
27 | #include "extent_io.h" | ||
28 | |||
29 | |||
30 | static u64 entry_end(struct btrfs_ordered_extent *entry) | ||
31 | { | ||
32 | if (entry->file_offset + entry->len < entry->file_offset) | ||
33 | return (u64)-1; | ||
34 | return entry->file_offset + entry->len; | ||
35 | } | ||
36 | |||
37 | static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, | ||
38 | struct rb_node *node) | ||
39 | { | ||
40 | struct rb_node ** p = &root->rb_node; | ||
41 | struct rb_node * parent = NULL; | ||
42 | struct btrfs_ordered_extent *entry; | ||
43 | |||
44 | while(*p) { | ||
45 | parent = *p; | ||
46 | entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); | ||
47 | |||
48 | if (file_offset < entry->file_offset) | ||
49 | p = &(*p)->rb_left; | ||
50 | else if (file_offset >= entry_end(entry)) | ||
51 | p = &(*p)->rb_right; | ||
52 | else | ||
53 | return parent; | ||
54 | } | ||
55 | |||
56 | rb_link_node(node, parent, p); | ||
57 | rb_insert_color(node, root); | ||
58 | return NULL; | ||
59 | } | ||
60 | |||
61 | static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, | ||
62 | struct rb_node **prev_ret) | ||
63 | { | ||
64 | struct rb_node * n = root->rb_node; | ||
65 | struct rb_node *prev = NULL; | ||
66 | struct rb_node *test; | ||
67 | struct btrfs_ordered_extent *entry; | ||
68 | struct btrfs_ordered_extent *prev_entry = NULL; | ||
69 | |||
70 | while(n) { | ||
71 | entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); | ||
72 | prev = n; | ||
73 | prev_entry = entry; | ||
74 | |||
75 | if (file_offset < entry->file_offset) | ||
76 | n = n->rb_left; | ||
77 | else if (file_offset >= entry_end(entry)) | ||
78 | n = n->rb_right; | ||
79 | else | ||
80 | return n; | ||
81 | } | ||
82 | if (!prev_ret) | ||
83 | return NULL; | ||
84 | |||
85 | while(prev && file_offset >= entry_end(prev_entry)) { | ||
86 | test = rb_next(prev); | ||
87 | if (!test) | ||
88 | break; | ||
89 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | ||
90 | rb_node); | ||
91 | if (file_offset < entry_end(prev_entry)) | ||
92 | break; | ||
93 | |||
94 | prev = test; | ||
95 | } | ||
96 | if (prev) | ||
97 | prev_entry = rb_entry(prev, struct btrfs_ordered_extent, | ||
98 | rb_node); | ||
99 | while(prev && file_offset < entry_end(prev_entry)) { | ||
100 | test = rb_prev(prev); | ||
101 | if (!test) | ||
102 | break; | ||
103 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, | ||
104 | rb_node); | ||
105 | prev = test; | ||
106 | } | ||
107 | *prev_ret = prev; | ||
108 | return NULL; | ||
109 | } | ||
110 | |||
111 | static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) | ||
112 | { | ||
113 | if (file_offset < entry->file_offset || | ||
114 | entry->file_offset + entry->len <= file_offset) | ||
115 | return 0; | ||
116 | return 1; | ||
117 | } | ||
118 | |||
119 | static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, | ||
120 | u64 file_offset) | ||
121 | { | ||
122 | struct rb_root *root = &tree->tree; | ||
123 | struct rb_node *prev; | ||
124 | struct rb_node *ret; | ||
125 | struct btrfs_ordered_extent *entry; | ||
126 | |||
127 | if (tree->last) { | ||
128 | entry = rb_entry(tree->last, struct btrfs_ordered_extent, | ||
129 | rb_node); | ||
130 | if (offset_in_entry(entry, file_offset)) | ||
131 | return tree->last; | ||
132 | } | ||
133 | ret = __tree_search(root, file_offset, &prev); | ||
134 | if (!ret) | ||
135 | ret = prev; | ||
136 | if (ret) | ||
137 | tree->last = ret; | ||
138 | return ret; | ||
139 | } | ||
140 | |||
141 | /* allocate and add a new ordered_extent into the per-inode tree. | ||
142 | * file_offset is the logical offset in the file | ||
143 | * | ||
144 | * start is the disk block number of an extent already reserved in the | ||
145 | * extent allocation tree | ||
146 | * | ||
147 | * len is the length of the extent | ||
148 | * | ||
149 | * This also sets the EXTENT_ORDERED bit on the range in the inode. | ||
150 | * | ||
151 | * The tree is given a single reference on the ordered extent that was | ||
152 | * inserted. | ||
153 | */ | ||
154 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, | ||
155 | u64 start, u64 len, int nocow) | ||
156 | { | ||
157 | struct btrfs_ordered_inode_tree *tree; | ||
158 | struct rb_node *node; | ||
159 | struct btrfs_ordered_extent *entry; | ||
160 | |||
161 | tree = &BTRFS_I(inode)->ordered_tree; | ||
162 | entry = kzalloc(sizeof(*entry), GFP_NOFS); | ||
163 | if (!entry) | ||
164 | return -ENOMEM; | ||
165 | |||
166 | mutex_lock(&tree->mutex); | ||
167 | entry->file_offset = file_offset; | ||
168 | entry->start = start; | ||
169 | entry->len = len; | ||
170 | entry->inode = inode; | ||
171 | if (nocow) | ||
172 | set_bit(BTRFS_ORDERED_NOCOW, &entry->flags); | ||
173 | |||
174 | /* one ref for the tree */ | ||
175 | atomic_set(&entry->refs, 1); | ||
176 | init_waitqueue_head(&entry->wait); | ||
177 | INIT_LIST_HEAD(&entry->list); | ||
178 | INIT_LIST_HEAD(&entry->root_extent_list); | ||
179 | |||
180 | node = tree_insert(&tree->tree, file_offset, | ||
181 | &entry->rb_node); | ||
182 | if (node) { | ||
183 | printk("warning dup entry from add_ordered_extent\n"); | ||
184 | BUG(); | ||
185 | } | ||
186 | set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset, | ||
187 | entry_end(entry) - 1, GFP_NOFS); | ||
188 | |||
189 | spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); | ||
190 | list_add_tail(&entry->root_extent_list, | ||
191 | &BTRFS_I(inode)->root->fs_info->ordered_extents); | ||
192 | spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); | ||
193 | |||
194 | mutex_unlock(&tree->mutex); | ||
195 | BUG_ON(node); | ||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * Add a struct btrfs_ordered_sum into the list of checksums to be inserted | ||
201 | * when an ordered extent is finished. If the list covers more than one | ||
202 | * ordered extent, it is split across multiples. | ||
203 | */ | ||
204 | int btrfs_add_ordered_sum(struct inode *inode, | ||
205 | struct btrfs_ordered_extent *entry, | ||
206 | struct btrfs_ordered_sum *sum) | ||
207 | { | ||
208 | struct btrfs_ordered_inode_tree *tree; | ||
209 | |||
210 | tree = &BTRFS_I(inode)->ordered_tree; | ||
211 | mutex_lock(&tree->mutex); | ||
212 | list_add_tail(&sum->list, &entry->list); | ||
213 | mutex_unlock(&tree->mutex); | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * this is used to account for finished IO across a given range | ||
219 | * of the file. The IO should not span ordered extents. If | ||
220 | * a given ordered_extent is completely done, 1 is returned, otherwise | ||
221 | * 0. | ||
222 | * | ||
223 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used | ||
224 | * to make sure this function only returns 1 once for a given ordered extent. | ||
225 | */ | ||
226 | int btrfs_dec_test_ordered_pending(struct inode *inode, | ||
227 | u64 file_offset, u64 io_size) | ||
228 | { | ||
229 | struct btrfs_ordered_inode_tree *tree; | ||
230 | struct rb_node *node; | ||
231 | struct btrfs_ordered_extent *entry; | ||
232 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | ||
233 | int ret; | ||
234 | |||
235 | tree = &BTRFS_I(inode)->ordered_tree; | ||
236 | mutex_lock(&tree->mutex); | ||
237 | clear_extent_ordered(io_tree, file_offset, file_offset + io_size - 1, | ||
238 | GFP_NOFS); | ||
239 | node = tree_search(tree, file_offset); | ||
240 | if (!node) { | ||
241 | ret = 1; | ||
242 | goto out; | ||
243 | } | ||
244 | |||
245 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | ||
246 | if (!offset_in_entry(entry, file_offset)) { | ||
247 | ret = 1; | ||
248 | goto out; | ||
249 | } | ||
250 | |||
251 | ret = test_range_bit(io_tree, entry->file_offset, | ||
252 | entry->file_offset + entry->len - 1, | ||
253 | EXTENT_ORDERED, 0); | ||
254 | if (ret == 0) | ||
255 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); | ||
256 | out: | ||
257 | mutex_unlock(&tree->mutex); | ||
258 | return ret == 0; | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * used to drop a reference on an ordered extent. This will free | ||
263 | * the extent if the last reference is dropped | ||
264 | */ | ||
265 | int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) | ||
266 | { | ||
267 | struct list_head *cur; | ||
268 | struct btrfs_ordered_sum *sum; | ||
269 | |||
270 | if (atomic_dec_and_test(&entry->refs)) { | ||
271 | while(!list_empty(&entry->list)) { | ||
272 | cur = entry->list.next; | ||
273 | sum = list_entry(cur, struct btrfs_ordered_sum, list); | ||
274 | list_del(&sum->list); | ||
275 | kfree(sum); | ||
276 | } | ||
277 | kfree(entry); | ||
278 | } | ||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | /* | ||
283 | * remove an ordered extent from the tree. No references are dropped | ||
284 | * but, anyone waiting on this extent is woken up. | ||
285 | */ | ||
286 | int btrfs_remove_ordered_extent(struct inode *inode, | ||
287 | struct btrfs_ordered_extent *entry) | ||
288 | { | ||
289 | struct btrfs_ordered_inode_tree *tree; | ||
290 | struct rb_node *node; | ||
291 | |||
292 | tree = &BTRFS_I(inode)->ordered_tree; | ||
293 | mutex_lock(&tree->mutex); | ||
294 | node = &entry->rb_node; | ||
295 | rb_erase(node, &tree->tree); | ||
296 | tree->last = NULL; | ||
297 | set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); | ||
298 | |||
299 | spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); | ||
300 | list_del_init(&entry->root_extent_list); | ||
301 | spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); | ||
302 | |||
303 | mutex_unlock(&tree->mutex); | ||
304 | wake_up(&entry->wait); | ||
305 | return 0; | ||
306 | } | ||
307 | |||
308 | int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only) | ||
309 | { | ||
310 | struct list_head splice; | ||
311 | struct list_head *cur; | ||
312 | struct list_head *tmp; | ||
313 | struct btrfs_ordered_extent *ordered; | ||
314 | struct inode *inode; | ||
315 | |||
316 | INIT_LIST_HEAD(&splice); | ||
317 | |||
318 | spin_lock(&root->fs_info->ordered_extent_lock); | ||
319 | list_splice_init(&root->fs_info->ordered_extents, &splice); | ||
320 | list_for_each_safe(cur, tmp, &splice) { | ||
321 | cur = splice.next; | ||
322 | ordered = list_entry(cur, struct btrfs_ordered_extent, | ||
323 | root_extent_list); | ||
324 | if (nocow_only && | ||
325 | !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { | ||
326 | cond_resched_lock(&root->fs_info->ordered_extent_lock); | ||
327 | continue; | ||
328 | } | ||
329 | |||
330 | list_del_init(&ordered->root_extent_list); | ||
331 | atomic_inc(&ordered->refs); | ||
332 | inode = ordered->inode; | ||
333 | |||
334 | /* | ||
335 | * the inode can't go away until all the pages are gone | ||
336 | * and the pages won't go away while there is still | ||
337 | * an ordered extent and the ordered extent won't go | ||
338 | * away until it is off this list. So, we can safely | ||
339 | * increment i_count here and call iput later | ||
340 | */ | ||
341 | atomic_inc(&inode->i_count); | ||
342 | spin_unlock(&root->fs_info->ordered_extent_lock); | ||
343 | |||
344 | btrfs_start_ordered_extent(inode, ordered, 1); | ||
345 | btrfs_put_ordered_extent(ordered); | ||
346 | iput(inode); | ||
347 | |||
348 | spin_lock(&root->fs_info->ordered_extent_lock); | ||
349 | } | ||
350 | list_splice_init(&splice, &root->fs_info->ordered_extents); | ||
351 | spin_unlock(&root->fs_info->ordered_extent_lock); | ||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * Used to start IO or wait for a given ordered extent to finish. | ||
357 | * | ||
358 | * If wait is one, this effectively waits on page writeback for all the pages | ||
359 | * in the extent, and it waits on the io completion code to insert | ||
360 | * metadata into the btree corresponding to the extent | ||
361 | */ | ||
362 | void btrfs_start_ordered_extent(struct inode *inode, | ||
363 | struct btrfs_ordered_extent *entry, | ||
364 | int wait) | ||
365 | { | ||
366 | u64 start = entry->file_offset; | ||
367 | u64 end = start + entry->len - 1; | ||
368 | |||
369 | /* | ||
370 | * pages in the range can be dirty, clean or writeback. We | ||
371 | * start IO on any dirty ones so the wait doesn't stall waiting | ||
372 | * for pdflush to find them | ||
373 | */ | ||
374 | btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_NONE); | ||
375 | if (wait) | ||
376 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, | ||
377 | &entry->flags)); | ||
378 | } | ||
379 | |||
380 | /* | ||
381 | * Used to wait on ordered extents across a large range of bytes. | ||
382 | */ | ||
383 | void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) | ||
384 | { | ||
385 | u64 end; | ||
386 | u64 orig_end; | ||
387 | u64 wait_end; | ||
388 | struct btrfs_ordered_extent *ordered; | ||
389 | |||
390 | if (start + len < start) { | ||
391 | orig_end = INT_LIMIT(loff_t); | ||
392 | } else { | ||
393 | orig_end = start + len - 1; | ||
394 | if (orig_end > INT_LIMIT(loff_t)) | ||
395 | orig_end = INT_LIMIT(loff_t); | ||
396 | } | ||
397 | wait_end = orig_end; | ||
398 | again: | ||
399 | /* start IO across the range first to instantiate any delalloc | ||
400 | * extents | ||
401 | */ | ||
402 | btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_NONE); | ||
403 | |||
404 | btrfs_wait_on_page_writeback_range(inode->i_mapping, | ||
405 | start >> PAGE_CACHE_SHIFT, | ||
406 | orig_end >> PAGE_CACHE_SHIFT); | ||
407 | |||
408 | end = orig_end; | ||
409 | while(1) { | ||
410 | ordered = btrfs_lookup_first_ordered_extent(inode, end); | ||
411 | if (!ordered) { | ||
412 | break; | ||
413 | } | ||
414 | if (ordered->file_offset > orig_end) { | ||
415 | btrfs_put_ordered_extent(ordered); | ||
416 | break; | ||
417 | } | ||
418 | if (ordered->file_offset + ordered->len < start) { | ||
419 | btrfs_put_ordered_extent(ordered); | ||
420 | break; | ||
421 | } | ||
422 | btrfs_start_ordered_extent(inode, ordered, 1); | ||
423 | end = ordered->file_offset; | ||
424 | btrfs_put_ordered_extent(ordered); | ||
425 | if (end == 0 || end == start) | ||
426 | break; | ||
427 | end--; | ||
428 | } | ||
429 | if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end, | ||
430 | EXTENT_ORDERED | EXTENT_DELALLOC, 0)) { | ||
431 | printk("inode %lu still ordered or delalloc after wait " | ||
432 | "%llu %llu\n", inode->i_ino, | ||
433 | (unsigned long long)start, | ||
434 | (unsigned long long)orig_end); | ||
435 | goto again; | ||
436 | } | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * find an ordered extent corresponding to file_offset. return NULL if | ||
441 | * nothing is found, otherwise take a reference on the extent and return it | ||
442 | */ | ||
443 | struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, | ||
444 | u64 file_offset) | ||
445 | { | ||
446 | struct btrfs_ordered_inode_tree *tree; | ||
447 | struct rb_node *node; | ||
448 | struct btrfs_ordered_extent *entry = NULL; | ||
449 | |||
450 | tree = &BTRFS_I(inode)->ordered_tree; | ||
451 | mutex_lock(&tree->mutex); | ||
452 | node = tree_search(tree, file_offset); | ||
453 | if (!node) | ||
454 | goto out; | ||
455 | |||
456 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | ||
457 | if (!offset_in_entry(entry, file_offset)) | ||
458 | entry = NULL; | ||
459 | if (entry) | ||
460 | atomic_inc(&entry->refs); | ||
461 | out: | ||
462 | mutex_unlock(&tree->mutex); | ||
463 | return entry; | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * lookup and return any extent before 'file_offset'. NULL is returned | ||
468 | * if none is found | ||
469 | */ | ||
470 | struct btrfs_ordered_extent * | ||
471 | btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset) | ||
472 | { | ||
473 | struct btrfs_ordered_inode_tree *tree; | ||
474 | struct rb_node *node; | ||
475 | struct btrfs_ordered_extent *entry = NULL; | ||
476 | |||
477 | tree = &BTRFS_I(inode)->ordered_tree; | ||
478 | mutex_lock(&tree->mutex); | ||
479 | node = tree_search(tree, file_offset); | ||
480 | if (!node) | ||
481 | goto out; | ||
482 | |||
483 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); | ||
484 | atomic_inc(&entry->refs); | ||
485 | out: | ||
486 | mutex_unlock(&tree->mutex); | ||
487 | return entry; | ||
488 | } | ||
489 | |||
490 | /* | ||
491 | * After an extent is done, call this to conditionally update the on disk | ||
492 | * i_size. i_size is updated to cover any fully written part of the file. | ||
493 | */ | ||
494 | int btrfs_ordered_update_i_size(struct inode *inode, | ||
495 | struct btrfs_ordered_extent *ordered) | ||
496 | { | ||
497 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | ||
498 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | ||
499 | u64 disk_i_size; | ||
500 | u64 new_i_size; | ||
501 | u64 i_size_test; | ||
502 | struct rb_node *node; | ||
503 | struct btrfs_ordered_extent *test; | ||
504 | |||
505 | mutex_lock(&tree->mutex); | ||
506 | disk_i_size = BTRFS_I(inode)->disk_i_size; | ||
507 | |||
508 | /* | ||
509 | * if the disk i_size is already at the inode->i_size, or | ||
510 | * this ordered extent is inside the disk i_size, we're done | ||
511 | */ | ||
512 | if (disk_i_size >= inode->i_size || | ||
513 | ordered->file_offset + ordered->len <= disk_i_size) { | ||
514 | goto out; | ||
515 | } | ||
516 | |||
517 | /* | ||
518 | * we can't update the disk_isize if there are delalloc bytes | ||
519 | * between disk_i_size and this ordered extent | ||
520 | */ | ||
521 | if (test_range_bit(io_tree, disk_i_size, | ||
522 | ordered->file_offset + ordered->len - 1, | ||
523 | EXTENT_DELALLOC, 0)) { | ||
524 | goto out; | ||
525 | } | ||
526 | /* | ||
527 | * walk backward from this ordered extent to disk_i_size. | ||
528 | * if we find an ordered extent then we can't update disk i_size | ||
529 | * yet | ||
530 | */ | ||
531 | node = &ordered->rb_node; | ||
532 | while(1) { | ||
533 | node = rb_prev(node); | ||
534 | if (!node) | ||
535 | break; | ||
536 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); | ||
537 | if (test->file_offset + test->len <= disk_i_size) | ||
538 | break; | ||
539 | if (test->file_offset >= inode->i_size) | ||
540 | break; | ||
541 | if (test->file_offset >= disk_i_size) | ||
542 | goto out; | ||
543 | } | ||
544 | new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode)); | ||
545 | |||
546 | /* | ||
547 | * at this point, we know we can safely update i_size to at least | ||
548 | * the offset from this ordered extent. But, we need to | ||
549 | * walk forward and see if ios from higher up in the file have | ||
550 | * finished. | ||
551 | */ | ||
552 | node = rb_next(&ordered->rb_node); | ||
553 | i_size_test = 0; | ||
554 | if (node) { | ||
555 | /* | ||
556 | * do we have an area where IO might have finished | ||
557 | * between our ordered extent and the next one. | ||
558 | */ | ||
559 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); | ||
560 | if (test->file_offset > entry_end(ordered)) { | ||
561 | i_size_test = test->file_offset; | ||
562 | } | ||
563 | } else { | ||
564 | i_size_test = i_size_read(inode); | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * i_size_test is the end of a region after this ordered | ||
569 | * extent where there are no ordered extents. As long as there | ||
570 | * are no delalloc bytes in this area, it is safe to update | ||
571 | * disk_i_size to the end of the region. | ||
572 | */ | ||
573 | if (i_size_test > entry_end(ordered) && | ||
574 | !test_range_bit(io_tree, entry_end(ordered), i_size_test - 1, | ||
575 | EXTENT_DELALLOC, 0)) { | ||
576 | new_i_size = min_t(u64, i_size_test, i_size_read(inode)); | ||
577 | } | ||
578 | BTRFS_I(inode)->disk_i_size = new_i_size; | ||
579 | out: | ||
580 | mutex_unlock(&tree->mutex); | ||
581 | return 0; | ||
582 | } | ||
583 | |||
584 | /* | ||
585 | * search the ordered extents for one corresponding to 'offset' and | ||
586 | * try to find a checksum. This is used because we allow pages to | ||
587 | * be reclaimed before their checksum is actually put into the btree | ||
588 | */ | ||
589 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum) | ||
590 | { | ||
591 | struct btrfs_ordered_sum *ordered_sum; | ||
592 | struct btrfs_sector_sum *sector_sums; | ||
593 | struct btrfs_ordered_extent *ordered; | ||
594 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; | ||
595 | struct list_head *cur; | ||
596 | unsigned long num_sectors; | ||
597 | unsigned long i; | ||
598 | u32 sectorsize = BTRFS_I(inode)->root->sectorsize; | ||
599 | int ret = 1; | ||
600 | |||
601 | ordered = btrfs_lookup_ordered_extent(inode, offset); | ||
602 | if (!ordered) | ||
603 | return 1; | ||
604 | |||
605 | mutex_lock(&tree->mutex); | ||
606 | list_for_each_prev(cur, &ordered->list) { | ||
607 | ordered_sum = list_entry(cur, struct btrfs_ordered_sum, list); | ||
608 | if (offset >= ordered_sum->file_offset) { | ||
609 | num_sectors = ordered_sum->len / sectorsize; | ||
610 | sector_sums = ordered_sum->sums; | ||
611 | for (i = 0; i < num_sectors; i++) { | ||
612 | if (sector_sums[i].offset == offset) { | ||
613 | *sum = sector_sums[i].sum; | ||
614 | ret = 0; | ||
615 | goto out; | ||
616 | } | ||
617 | } | ||
618 | } | ||
619 | } | ||
620 | out: | ||
621 | mutex_unlock(&tree->mutex); | ||
622 | btrfs_put_ordered_extent(ordered); | ||
623 | return ret; | ||
624 | } | ||
625 | |||
626 | |||
627 | /** | ||
628 | * taken from mm/filemap.c because it isn't exported | ||
629 | * | ||
630 | * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range | ||
631 | * @mapping: address space structure to write | ||
632 | * @start: offset in bytes where the range starts | ||
633 | * @end: offset in bytes where the range ends (inclusive) | ||
634 | * @sync_mode: enable synchronous operation | ||
635 | * | ||
636 | * Start writeback against all of a mapping's dirty pages that lie | ||
637 | * within the byte offsets <start, end> inclusive. | ||
638 | * | ||
639 | * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as | ||
640 | * opposed to a regular memory cleansing writeback. The difference between | ||
641 | * these two operations is that if a dirty page/buffer is encountered, it must | ||
642 | * be waited upon, and not just skipped over. | ||
643 | */ | ||
644 | int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start, | ||
645 | loff_t end, int sync_mode) | ||
646 | { | ||
647 | struct writeback_control wbc = { | ||
648 | .sync_mode = sync_mode, | ||
649 | .nr_to_write = mapping->nrpages * 2, | ||
650 | .range_start = start, | ||
651 | .range_end = end, | ||
652 | .for_writepages = 1, | ||
653 | }; | ||
654 | return btrfs_writepages(mapping, &wbc); | ||
655 | } | ||
656 | |||
657 | /** | ||
658 | * taken from mm/filemap.c because it isn't exported | ||
659 | * | ||
660 | * wait_on_page_writeback_range - wait for writeback to complete | ||
661 | * @mapping: target address_space | ||
662 | * @start: beginning page index | ||
663 | * @end: ending page index | ||
664 | * | ||
665 | * Wait for writeback to complete against pages indexed by start->end | ||
666 | * inclusive | ||
667 | */ | ||
668 | int btrfs_wait_on_page_writeback_range(struct address_space *mapping, | ||
669 | pgoff_t start, pgoff_t end) | ||
670 | { | ||
671 | struct pagevec pvec; | ||
672 | int nr_pages; | ||
673 | int ret = 0; | ||
674 | pgoff_t index; | ||
675 | |||
676 | if (end < start) | ||
677 | return 0; | ||
678 | |||
679 | pagevec_init(&pvec, 0); | ||
680 | index = start; | ||
681 | while ((index <= end) && | ||
682 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | ||
683 | PAGECACHE_TAG_WRITEBACK, | ||
684 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { | ||
685 | unsigned i; | ||
686 | |||
687 | for (i = 0; i < nr_pages; i++) { | ||
688 | struct page *page = pvec.pages[i]; | ||
689 | |||
690 | /* until radix tree lookup accepts end_index */ | ||
691 | if (page->index > end) | ||
692 | continue; | ||
693 | |||
694 | wait_on_page_writeback(page); | ||
695 | if (PageError(page)) | ||
696 | ret = -EIO; | ||
697 | } | ||
698 | pagevec_release(&pvec); | ||
699 | cond_resched(); | ||
700 | } | ||
701 | |||
702 | /* Check for outstanding write errors */ | ||
703 | if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) | ||
704 | ret = -ENOSPC; | ||
705 | if (test_and_clear_bit(AS_EIO, &mapping->flags)) | ||
706 | ret = -EIO; | ||
707 | |||
708 | return ret; | ||
709 | } | ||