diff options
Diffstat (limited to 'fs/btrfs/delayed-ref.c')
-rw-r--r-- | fs/btrfs/delayed-ref.c | 153 |
1 files changed, 106 insertions, 47 deletions
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 125cf76fcd08..66e4f29505a3 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c | |||
@@ -101,6 +101,11 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2, | |||
101 | return -1; | 101 | return -1; |
102 | if (ref1->type > ref2->type) | 102 | if (ref1->type > ref2->type) |
103 | return 1; | 103 | return 1; |
104 | /* merging of sequenced refs is not allowed */ | ||
105 | if (ref1->seq < ref2->seq) | ||
106 | return -1; | ||
107 | if (ref1->seq > ref2->seq) | ||
108 | return 1; | ||
104 | if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY || | 109 | if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY || |
105 | ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) { | 110 | ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) { |
106 | return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2), | 111 | return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2), |
@@ -150,16 +155,22 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root, | |||
150 | 155 | ||
151 | /* | 156 | /* |
152 | * find an head entry based on bytenr. This returns the delayed ref | 157 | * find an head entry based on bytenr. This returns the delayed ref |
153 | * head if it was able to find one, or NULL if nothing was in that spot | 158 | * head if it was able to find one, or NULL if nothing was in that spot. |
159 | * If return_bigger is given, the next bigger entry is returned if no exact | ||
160 | * match is found. | ||
154 | */ | 161 | */ |
155 | static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root, | 162 | static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root, |
156 | u64 bytenr, | 163 | u64 bytenr, |
157 | struct btrfs_delayed_ref_node **last) | 164 | struct btrfs_delayed_ref_node **last, |
165 | int return_bigger) | ||
158 | { | 166 | { |
159 | struct rb_node *n = root->rb_node; | 167 | struct rb_node *n; |
160 | struct btrfs_delayed_ref_node *entry; | 168 | struct btrfs_delayed_ref_node *entry; |
161 | int cmp; | 169 | int cmp = 0; |
162 | 170 | ||
171 | again: | ||
172 | n = root->rb_node; | ||
173 | entry = NULL; | ||
163 | while (n) { | 174 | while (n) { |
164 | entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); | 175 | entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); |
165 | WARN_ON(!entry->in_tree); | 176 | WARN_ON(!entry->in_tree); |
@@ -182,6 +193,19 @@ static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root, | |||
182 | else | 193 | else |
183 | return entry; | 194 | return entry; |
184 | } | 195 | } |
196 | if (entry && return_bigger) { | ||
197 | if (cmp > 0) { | ||
198 | n = rb_next(&entry->rb_node); | ||
199 | if (!n) | ||
200 | n = rb_first(root); | ||
201 | entry = rb_entry(n, struct btrfs_delayed_ref_node, | ||
202 | rb_node); | ||
203 | bytenr = entry->bytenr; | ||
204 | return_bigger = 0; | ||
205 | goto again; | ||
206 | } | ||
207 | return entry; | ||
208 | } | ||
185 | return NULL; | 209 | return NULL; |
186 | } | 210 | } |
187 | 211 | ||
@@ -209,6 +233,24 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, | |||
209 | return 0; | 233 | return 0; |
210 | } | 234 | } |
211 | 235 | ||
236 | int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs, | ||
237 | u64 seq) | ||
238 | { | ||
239 | struct seq_list *elem; | ||
240 | |||
241 | assert_spin_locked(&delayed_refs->lock); | ||
242 | if (list_empty(&delayed_refs->seq_head)) | ||
243 | return 0; | ||
244 | |||
245 | elem = list_first_entry(&delayed_refs->seq_head, struct seq_list, list); | ||
246 | if (seq >= elem->seq) { | ||
247 | pr_debug("holding back delayed_ref %llu, lowest is %llu (%p)\n", | ||
248 | seq, elem->seq, delayed_refs); | ||
249 | return 1; | ||
250 | } | ||
251 | return 0; | ||
252 | } | ||
253 | |||
212 | int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, | 254 | int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, |
213 | struct list_head *cluster, u64 start) | 255 | struct list_head *cluster, u64 start) |
214 | { | 256 | { |
@@ -223,20 +265,8 @@ int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, | |||
223 | node = rb_first(&delayed_refs->root); | 265 | node = rb_first(&delayed_refs->root); |
224 | } else { | 266 | } else { |
225 | ref = NULL; | 267 | ref = NULL; |
226 | find_ref_head(&delayed_refs->root, start, &ref); | 268 | find_ref_head(&delayed_refs->root, start + 1, &ref, 1); |
227 | if (ref) { | 269 | if (ref) { |
228 | struct btrfs_delayed_ref_node *tmp; | ||
229 | |||
230 | node = rb_prev(&ref->rb_node); | ||
231 | while (node) { | ||
232 | tmp = rb_entry(node, | ||
233 | struct btrfs_delayed_ref_node, | ||
234 | rb_node); | ||
235 | if (tmp->bytenr < start) | ||
236 | break; | ||
237 | ref = tmp; | ||
238 | node = rb_prev(&ref->rb_node); | ||
239 | } | ||
240 | node = &ref->rb_node; | 270 | node = &ref->rb_node; |
241 | } else | 271 | } else |
242 | node = rb_first(&delayed_refs->root); | 272 | node = rb_first(&delayed_refs->root); |
@@ -390,7 +420,8 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing, | |||
390 | * this does all the dirty work in terms of maintaining the correct | 420 | * this does all the dirty work in terms of maintaining the correct |
391 | * overall modification count. | 421 | * overall modification count. |
392 | */ | 422 | */ |
393 | static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans, | 423 | static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info, |
424 | struct btrfs_trans_handle *trans, | ||
394 | struct btrfs_delayed_ref_node *ref, | 425 | struct btrfs_delayed_ref_node *ref, |
395 | u64 bytenr, u64 num_bytes, | 426 | u64 bytenr, u64 num_bytes, |
396 | int action, int is_data) | 427 | int action, int is_data) |
@@ -437,6 +468,7 @@ static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans, | |||
437 | ref->action = 0; | 468 | ref->action = 0; |
438 | ref->is_head = 1; | 469 | ref->is_head = 1; |
439 | ref->in_tree = 1; | 470 | ref->in_tree = 1; |
471 | ref->seq = 0; | ||
440 | 472 | ||
441 | head_ref = btrfs_delayed_node_to_head(ref); | 473 | head_ref = btrfs_delayed_node_to_head(ref); |
442 | head_ref->must_insert_reserved = must_insert_reserved; | 474 | head_ref->must_insert_reserved = must_insert_reserved; |
@@ -468,14 +500,17 @@ static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans, | |||
468 | /* | 500 | /* |
469 | * helper to insert a delayed tree ref into the rbtree. | 501 | * helper to insert a delayed tree ref into the rbtree. |
470 | */ | 502 | */ |
471 | static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans, | 503 | static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info, |
504 | struct btrfs_trans_handle *trans, | ||
472 | struct btrfs_delayed_ref_node *ref, | 505 | struct btrfs_delayed_ref_node *ref, |
473 | u64 bytenr, u64 num_bytes, u64 parent, | 506 | u64 bytenr, u64 num_bytes, u64 parent, |
474 | u64 ref_root, int level, int action) | 507 | u64 ref_root, int level, int action, |
508 | int for_cow) | ||
475 | { | 509 | { |
476 | struct btrfs_delayed_ref_node *existing; | 510 | struct btrfs_delayed_ref_node *existing; |
477 | struct btrfs_delayed_tree_ref *full_ref; | 511 | struct btrfs_delayed_tree_ref *full_ref; |
478 | struct btrfs_delayed_ref_root *delayed_refs; | 512 | struct btrfs_delayed_ref_root *delayed_refs; |
513 | u64 seq = 0; | ||
479 | 514 | ||
480 | if (action == BTRFS_ADD_DELAYED_EXTENT) | 515 | if (action == BTRFS_ADD_DELAYED_EXTENT) |
481 | action = BTRFS_ADD_DELAYED_REF; | 516 | action = BTRFS_ADD_DELAYED_REF; |
@@ -491,14 +526,17 @@ static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans, | |||
491 | ref->is_head = 0; | 526 | ref->is_head = 0; |
492 | ref->in_tree = 1; | 527 | ref->in_tree = 1; |
493 | 528 | ||
529 | if (need_ref_seq(for_cow, ref_root)) | ||
530 | seq = inc_delayed_seq(delayed_refs); | ||
531 | ref->seq = seq; | ||
532 | |||
494 | full_ref = btrfs_delayed_node_to_tree_ref(ref); | 533 | full_ref = btrfs_delayed_node_to_tree_ref(ref); |
495 | if (parent) { | 534 | full_ref->parent = parent; |
496 | full_ref->parent = parent; | 535 | full_ref->root = ref_root; |
536 | if (parent) | ||
497 | ref->type = BTRFS_SHARED_BLOCK_REF_KEY; | 537 | ref->type = BTRFS_SHARED_BLOCK_REF_KEY; |
498 | } else { | 538 | else |
499 | full_ref->root = ref_root; | ||
500 | ref->type = BTRFS_TREE_BLOCK_REF_KEY; | 539 | ref->type = BTRFS_TREE_BLOCK_REF_KEY; |
501 | } | ||
502 | full_ref->level = level; | 540 | full_ref->level = level; |
503 | 541 | ||
504 | trace_btrfs_delayed_tree_ref(ref, full_ref, action); | 542 | trace_btrfs_delayed_tree_ref(ref, full_ref, action); |
@@ -522,15 +560,17 @@ static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans, | |||
522 | /* | 560 | /* |
523 | * helper to insert a delayed data ref into the rbtree. | 561 | * helper to insert a delayed data ref into the rbtree. |
524 | */ | 562 | */ |
525 | static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans, | 563 | static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info, |
564 | struct btrfs_trans_handle *trans, | ||
526 | struct btrfs_delayed_ref_node *ref, | 565 | struct btrfs_delayed_ref_node *ref, |
527 | u64 bytenr, u64 num_bytes, u64 parent, | 566 | u64 bytenr, u64 num_bytes, u64 parent, |
528 | u64 ref_root, u64 owner, u64 offset, | 567 | u64 ref_root, u64 owner, u64 offset, |
529 | int action) | 568 | int action, int for_cow) |
530 | { | 569 | { |
531 | struct btrfs_delayed_ref_node *existing; | 570 | struct btrfs_delayed_ref_node *existing; |
532 | struct btrfs_delayed_data_ref *full_ref; | 571 | struct btrfs_delayed_data_ref *full_ref; |
533 | struct btrfs_delayed_ref_root *delayed_refs; | 572 | struct btrfs_delayed_ref_root *delayed_refs; |
573 | u64 seq = 0; | ||
534 | 574 | ||
535 | if (action == BTRFS_ADD_DELAYED_EXTENT) | 575 | if (action == BTRFS_ADD_DELAYED_EXTENT) |
536 | action = BTRFS_ADD_DELAYED_REF; | 576 | action = BTRFS_ADD_DELAYED_REF; |
@@ -546,14 +586,18 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans, | |||
546 | ref->is_head = 0; | 586 | ref->is_head = 0; |
547 | ref->in_tree = 1; | 587 | ref->in_tree = 1; |
548 | 588 | ||
589 | if (need_ref_seq(for_cow, ref_root)) | ||
590 | seq = inc_delayed_seq(delayed_refs); | ||
591 | ref->seq = seq; | ||
592 | |||
549 | full_ref = btrfs_delayed_node_to_data_ref(ref); | 593 | full_ref = btrfs_delayed_node_to_data_ref(ref); |
550 | if (parent) { | 594 | full_ref->parent = parent; |
551 | full_ref->parent = parent; | 595 | full_ref->root = ref_root; |
596 | if (parent) | ||
552 | ref->type = BTRFS_SHARED_DATA_REF_KEY; | 597 | ref->type = BTRFS_SHARED_DATA_REF_KEY; |
553 | } else { | 598 | else |
554 | full_ref->root = ref_root; | ||
555 | ref->type = BTRFS_EXTENT_DATA_REF_KEY; | 599 | ref->type = BTRFS_EXTENT_DATA_REF_KEY; |
556 | } | 600 | |
557 | full_ref->objectid = owner; | 601 | full_ref->objectid = owner; |
558 | full_ref->offset = offset; | 602 | full_ref->offset = offset; |
559 | 603 | ||
@@ -580,10 +624,12 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans, | |||
580 | * to make sure the delayed ref is eventually processed before this | 624 | * to make sure the delayed ref is eventually processed before this |
581 | * transaction commits. | 625 | * transaction commits. |
582 | */ | 626 | */ |
583 | int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans, | 627 | int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, |
628 | struct btrfs_trans_handle *trans, | ||
584 | u64 bytenr, u64 num_bytes, u64 parent, | 629 | u64 bytenr, u64 num_bytes, u64 parent, |
585 | u64 ref_root, int level, int action, | 630 | u64 ref_root, int level, int action, |
586 | struct btrfs_delayed_extent_op *extent_op) | 631 | struct btrfs_delayed_extent_op *extent_op, |
632 | int for_cow) | ||
587 | { | 633 | { |
588 | struct btrfs_delayed_tree_ref *ref; | 634 | struct btrfs_delayed_tree_ref *ref; |
589 | struct btrfs_delayed_ref_head *head_ref; | 635 | struct btrfs_delayed_ref_head *head_ref; |
@@ -610,13 +656,17 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans, | |||
610 | * insert both the head node and the new ref without dropping | 656 | * insert both the head node and the new ref without dropping |
611 | * the spin lock | 657 | * the spin lock |
612 | */ | 658 | */ |
613 | ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes, | 659 | ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, |
614 | action, 0); | 660 | num_bytes, action, 0); |
615 | BUG_ON(ret); | 661 | BUG_ON(ret); |
616 | 662 | ||
617 | ret = add_delayed_tree_ref(trans, &ref->node, bytenr, num_bytes, | 663 | ret = add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr, |
618 | parent, ref_root, level, action); | 664 | num_bytes, parent, ref_root, level, action, |
665 | for_cow); | ||
619 | BUG_ON(ret); | 666 | BUG_ON(ret); |
667 | if (!need_ref_seq(for_cow, ref_root) && | ||
668 | waitqueue_active(&delayed_refs->seq_wait)) | ||
669 | wake_up(&delayed_refs->seq_wait); | ||
620 | spin_unlock(&delayed_refs->lock); | 670 | spin_unlock(&delayed_refs->lock); |
621 | return 0; | 671 | return 0; |
622 | } | 672 | } |
@@ -624,11 +674,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans, | |||
624 | /* | 674 | /* |
625 | * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref. | 675 | * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref. |
626 | */ | 676 | */ |
627 | int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans, | 677 | int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, |
678 | struct btrfs_trans_handle *trans, | ||
628 | u64 bytenr, u64 num_bytes, | 679 | u64 bytenr, u64 num_bytes, |
629 | u64 parent, u64 ref_root, | 680 | u64 parent, u64 ref_root, |
630 | u64 owner, u64 offset, int action, | 681 | u64 owner, u64 offset, int action, |
631 | struct btrfs_delayed_extent_op *extent_op) | 682 | struct btrfs_delayed_extent_op *extent_op, |
683 | int for_cow) | ||
632 | { | 684 | { |
633 | struct btrfs_delayed_data_ref *ref; | 685 | struct btrfs_delayed_data_ref *ref; |
634 | struct btrfs_delayed_ref_head *head_ref; | 686 | struct btrfs_delayed_ref_head *head_ref; |
@@ -655,18 +707,23 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans, | |||
655 | * insert both the head node and the new ref without dropping | 707 | * insert both the head node and the new ref without dropping |
656 | * the spin lock | 708 | * the spin lock |
657 | */ | 709 | */ |
658 | ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes, | 710 | ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, |
659 | action, 1); | 711 | num_bytes, action, 1); |
660 | BUG_ON(ret); | 712 | BUG_ON(ret); |
661 | 713 | ||
662 | ret = add_delayed_data_ref(trans, &ref->node, bytenr, num_bytes, | 714 | ret = add_delayed_data_ref(fs_info, trans, &ref->node, bytenr, |
663 | parent, ref_root, owner, offset, action); | 715 | num_bytes, parent, ref_root, owner, offset, |
716 | action, for_cow); | ||
664 | BUG_ON(ret); | 717 | BUG_ON(ret); |
718 | if (!need_ref_seq(for_cow, ref_root) && | ||
719 | waitqueue_active(&delayed_refs->seq_wait)) | ||
720 | wake_up(&delayed_refs->seq_wait); | ||
665 | spin_unlock(&delayed_refs->lock); | 721 | spin_unlock(&delayed_refs->lock); |
666 | return 0; | 722 | return 0; |
667 | } | 723 | } |
668 | 724 | ||
669 | int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans, | 725 | int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, |
726 | struct btrfs_trans_handle *trans, | ||
670 | u64 bytenr, u64 num_bytes, | 727 | u64 bytenr, u64 num_bytes, |
671 | struct btrfs_delayed_extent_op *extent_op) | 728 | struct btrfs_delayed_extent_op *extent_op) |
672 | { | 729 | { |
@@ -683,11 +740,13 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans, | |||
683 | delayed_refs = &trans->transaction->delayed_refs; | 740 | delayed_refs = &trans->transaction->delayed_refs; |
684 | spin_lock(&delayed_refs->lock); | 741 | spin_lock(&delayed_refs->lock); |
685 | 742 | ||
686 | ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, | 743 | ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, |
687 | num_bytes, BTRFS_UPDATE_DELAYED_HEAD, | 744 | num_bytes, BTRFS_UPDATE_DELAYED_HEAD, |
688 | extent_op->is_data); | 745 | extent_op->is_data); |
689 | BUG_ON(ret); | 746 | BUG_ON(ret); |
690 | 747 | ||
748 | if (waitqueue_active(&delayed_refs->seq_wait)) | ||
749 | wake_up(&delayed_refs->seq_wait); | ||
691 | spin_unlock(&delayed_refs->lock); | 750 | spin_unlock(&delayed_refs->lock); |
692 | return 0; | 751 | return 0; |
693 | } | 752 | } |
@@ -704,7 +763,7 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr) | |||
704 | struct btrfs_delayed_ref_root *delayed_refs; | 763 | struct btrfs_delayed_ref_root *delayed_refs; |
705 | 764 | ||
706 | delayed_refs = &trans->transaction->delayed_refs; | 765 | delayed_refs = &trans->transaction->delayed_refs; |
707 | ref = find_ref_head(&delayed_refs->root, bytenr, NULL); | 766 | ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0); |
708 | if (ref) | 767 | if (ref) |
709 | return btrfs_delayed_node_to_head(ref); | 768 | return btrfs_delayed_node_to_head(ref); |
710 | return NULL; | 769 | return NULL; |