summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/delayed-ref.c50
-rw-r--r--fs/btrfs/delayed-ref.h3
-rw-r--r--fs/btrfs/qgroup.c31
-rw-r--r--fs/btrfs/qgroup.h17
-rw-r--r--fs/btrfs/transaction.c1
5 files changed, 95 insertions, 7 deletions
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index fc9563d42693..fd64fd0f011a 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -22,6 +22,7 @@
22#include "ctree.h" 22#include "ctree.h"
23#include "delayed-ref.h" 23#include "delayed-ref.h"
24#include "transaction.h" 24#include "transaction.h"
25#include "qgroup.h"
25 26
26struct kmem_cache *btrfs_delayed_ref_head_cachep; 27struct kmem_cache *btrfs_delayed_ref_head_cachep;
27struct kmem_cache *btrfs_delayed_tree_ref_cachep; 28struct kmem_cache *btrfs_delayed_tree_ref_cachep;
@@ -420,12 +421,14 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
420static noinline struct btrfs_delayed_ref_head * 421static noinline struct btrfs_delayed_ref_head *
421add_delayed_ref_head(struct btrfs_fs_info *fs_info, 422add_delayed_ref_head(struct btrfs_fs_info *fs_info,
422 struct btrfs_trans_handle *trans, 423 struct btrfs_trans_handle *trans,
423 struct btrfs_delayed_ref_node *ref, u64 bytenr, 424 struct btrfs_delayed_ref_node *ref,
424 u64 num_bytes, int action, int is_data) 425 struct btrfs_qgroup_extent_record *qrecord,
426 u64 bytenr, u64 num_bytes, int action, int is_data)
425{ 427{
426 struct btrfs_delayed_ref_head *existing; 428 struct btrfs_delayed_ref_head *existing;
427 struct btrfs_delayed_ref_head *head_ref = NULL; 429 struct btrfs_delayed_ref_head *head_ref = NULL;
428 struct btrfs_delayed_ref_root *delayed_refs; 430 struct btrfs_delayed_ref_root *delayed_refs;
431 struct btrfs_qgroup_extent_record *qexisting;
429 int count_mod = 1; 432 int count_mod = 1;
430 int must_insert_reserved = 0; 433 int must_insert_reserved = 0;
431 434
@@ -474,6 +477,18 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
474 head_ref->processing = 0; 477 head_ref->processing = 0;
475 head_ref->total_ref_mod = count_mod; 478 head_ref->total_ref_mod = count_mod;
476 479
480 /* Record qgroup extent info if provided */
481 if (qrecord) {
482 qrecord->bytenr = bytenr;
483 qrecord->num_bytes = num_bytes;
484 qrecord->old_roots = NULL;
485
486 qexisting = btrfs_qgroup_insert_dirty_extent(delayed_refs,
487 qrecord);
488 if (qexisting)
489 kfree(qrecord);
490 }
491
477 spin_lock_init(&head_ref->lock); 492 spin_lock_init(&head_ref->lock);
478 mutex_init(&head_ref->mutex); 493 mutex_init(&head_ref->mutex);
479 494
@@ -624,6 +639,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
624 struct btrfs_delayed_tree_ref *ref; 639 struct btrfs_delayed_tree_ref *ref;
625 struct btrfs_delayed_ref_head *head_ref; 640 struct btrfs_delayed_ref_head *head_ref;
626 struct btrfs_delayed_ref_root *delayed_refs; 641 struct btrfs_delayed_ref_root *delayed_refs;
642 struct btrfs_qgroup_extent_record *record = NULL;
627 643
628 if (!is_fstree(ref_root) || !fs_info->quota_enabled) 644 if (!is_fstree(ref_root) || !fs_info->quota_enabled)
629 no_quota = 0; 645 no_quota = 0;
@@ -639,6 +655,15 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
639 return -ENOMEM; 655 return -ENOMEM;
640 } 656 }
641 657
658 if (fs_info->quota_enabled && is_fstree(ref_root)) {
659 record = kmalloc(sizeof(*record), GFP_NOFS);
660 if (!record) {
661 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
662 kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
663 return -ENOMEM;
664 }
665 }
666
642 head_ref->extent_op = extent_op; 667 head_ref->extent_op = extent_op;
643 668
644 delayed_refs = &trans->transaction->delayed_refs; 669 delayed_refs = &trans->transaction->delayed_refs;
@@ -648,7 +673,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
648 * insert both the head node and the new ref without dropping 673 * insert both the head node and the new ref without dropping
649 * the spin lock 674 * the spin lock
650 */ 675 */
651 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, 676 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
652 bytenr, num_bytes, action, 0); 677 bytenr, num_bytes, action, 0);
653 678
654 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr, 679 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
@@ -673,6 +698,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
673 struct btrfs_delayed_data_ref *ref; 698 struct btrfs_delayed_data_ref *ref;
674 struct btrfs_delayed_ref_head *head_ref; 699 struct btrfs_delayed_ref_head *head_ref;
675 struct btrfs_delayed_ref_root *delayed_refs; 700 struct btrfs_delayed_ref_root *delayed_refs;
701 struct btrfs_qgroup_extent_record *record = NULL;
676 702
677 if (!is_fstree(ref_root) || !fs_info->quota_enabled) 703 if (!is_fstree(ref_root) || !fs_info->quota_enabled)
678 no_quota = 0; 704 no_quota = 0;
@@ -688,6 +714,16 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
688 return -ENOMEM; 714 return -ENOMEM;
689 } 715 }
690 716
717 if (fs_info->quota_enabled && is_fstree(ref_root)) {
718 record = kmalloc(sizeof(*record), GFP_NOFS);
719 if (!record) {
720 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
721 kmem_cache_free(btrfs_delayed_ref_head_cachep,
722 head_ref);
723 return -ENOMEM;
724 }
725 }
726
691 head_ref->extent_op = extent_op; 727 head_ref->extent_op = extent_op;
692 728
693 delayed_refs = &trans->transaction->delayed_refs; 729 delayed_refs = &trans->transaction->delayed_refs;
@@ -697,7 +733,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
697 * insert both the head node and the new ref without dropping 733 * insert both the head node and the new ref without dropping
698 * the spin lock 734 * the spin lock
699 */ 735 */
700 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, 736 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
701 bytenr, num_bytes, action, 1); 737 bytenr, num_bytes, action, 1);
702 738
703 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr, 739 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
@@ -725,9 +761,9 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
725 delayed_refs = &trans->transaction->delayed_refs; 761 delayed_refs = &trans->transaction->delayed_refs;
726 spin_lock(&delayed_refs->lock); 762 spin_lock(&delayed_refs->lock);
727 763
728 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, 764 add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
729 num_bytes, BTRFS_UPDATE_DELAYED_HEAD, 765 num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
730 extent_op->is_data); 766 extent_op->is_data);
731 767
732 spin_unlock(&delayed_refs->lock); 768 spin_unlock(&delayed_refs->lock);
733 return 0; 769 return 0;
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 362ca57cfeb7..4016f963599e 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -148,6 +148,9 @@ struct btrfs_delayed_ref_root {
148 /* head ref rbtree */ 148 /* head ref rbtree */
149 struct rb_root href_root; 149 struct rb_root href_root;
150 150
151 /* dirty extent records */
152 struct rb_root dirty_extent_root;
153
151 /* this spin lock protects the rbtree and the entries inside */ 154 /* this spin lock protects the rbtree and the entries inside */
152 spinlock_t lock; 155 spinlock_t lock;
153 156
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 2f185eee2387..55465d5d788e 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1553,6 +1553,37 @@ int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
1553 return 0; 1553 return 0;
1554} 1554}
1555 1555
1556struct btrfs_qgroup_extent_record
1557*btrfs_qgroup_insert_dirty_extent(struct btrfs_delayed_ref_root *delayed_refs,
1558 struct btrfs_qgroup_extent_record *record)
1559{
1560 struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1561 struct rb_node *parent_node = NULL;
1562 struct btrfs_qgroup_extent_record *entry;
1563 u64 bytenr = record->bytenr;
1564
1565 while (*p) {
1566 parent_node = *p;
1567 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1568 node);
1569 if (bytenr < entry->bytenr)
1570 p = &(*p)->rb_left;
1571 else if (bytenr > entry->bytenr)
1572 p = &(*p)->rb_right;
1573 else
1574 return entry;
1575 }
1576
1577 rb_link_node(&record->node, parent_node, p);
1578 rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
1579 return NULL;
1580}
1581
1582/*
1583 * The easy accounting, if we are adding/removing the only ref for an extent
1584 * then this qgroup and all of the parent qgroups get their refrence and
1585 * exclusive counts adjusted.
1586 */
1556static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info, 1587static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1557 struct btrfs_qgroup_operation *oper) 1588 struct btrfs_qgroup_operation *oper)
1558{ 1589{
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index c5242aa9a4b2..e58155d0390c 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -19,6 +19,9 @@
19#ifndef __BTRFS_QGROUP__ 19#ifndef __BTRFS_QGROUP__
20#define __BTRFS_QGROUP__ 20#define __BTRFS_QGROUP__
21 21
22#include "ulist.h"
23#include "delayed-ref.h"
24
22/* 25/*
23 * A description of the operations, all of these operations only happen when we 26 * A description of the operations, all of these operations only happen when we
24 * are adding the 1st reference for that subvolume in the case of adding space 27 * are adding the 1st reference for that subvolume in the case of adding space
@@ -58,6 +61,17 @@ struct btrfs_qgroup_operation {
58 struct list_head list; 61 struct list_head list;
59}; 62};
60 63
64/*
65 * Record a dirty extent, and info qgroup to update quota on it
66 * TODO: Use kmem cache to alloc it.
67 */
68struct btrfs_qgroup_extent_record {
69 struct rb_node node;
70 u64 bytenr;
71 u64 num_bytes;
72 struct ulist *old_roots;
73};
74
61int btrfs_quota_enable(struct btrfs_trans_handle *trans, 75int btrfs_quota_enable(struct btrfs_trans_handle *trans,
62 struct btrfs_fs_info *fs_info); 76 struct btrfs_fs_info *fs_info);
63int btrfs_quota_disable(struct btrfs_trans_handle *trans, 77int btrfs_quota_disable(struct btrfs_trans_handle *trans,
@@ -84,6 +98,9 @@ int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
84 u64 bytenr, u64 num_bytes, 98 u64 bytenr, u64 num_bytes,
85 enum btrfs_qgroup_operation_type type, 99 enum btrfs_qgroup_operation_type type,
86 int mod_seq); 100 int mod_seq);
101struct btrfs_qgroup_extent_record
102*btrfs_qgroup_insert_dirty_extent(struct btrfs_delayed_ref_root *delayed_refs,
103 struct btrfs_qgroup_extent_record *record);
87int btrfs_delayed_qgroup_accounting(struct btrfs_trans_handle *trans, 104int btrfs_delayed_qgroup_accounting(struct btrfs_trans_handle *trans,
88 struct btrfs_fs_info *fs_info); 105 struct btrfs_fs_info *fs_info);
89void btrfs_remove_qgroup_operation(struct btrfs_trans_handle *trans, 106void btrfs_remove_qgroup_operation(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 03a3ec7e31ea..3694d57e759f 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -225,6 +225,7 @@ loop:
225 cur_trans->dirty_bg_run = 0; 225 cur_trans->dirty_bg_run = 0;
226 226
227 cur_trans->delayed_refs.href_root = RB_ROOT; 227 cur_trans->delayed_refs.href_root = RB_ROOT;
228 cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
228 atomic_set(&cur_trans->delayed_refs.num_entries, 0); 229 atomic_set(&cur_trans->delayed_refs.num_entries, 0);
229 cur_trans->delayed_refs.num_heads_ready = 0; 230 cur_trans->delayed_refs.num_heads_ready = 0;
230 cur_trans->delayed_refs.pending_csums = 0; 231 cur_trans->delayed_refs.pending_csums = 0;