aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorJosef Bacik <josef@redhat.com>2011-01-25 16:30:38 -0500
committerJosef Bacik <josef@redhat.com>2011-03-17 14:21:18 -0400
commit57a45ced94fe48a701361d64230fc16eefa189dd (patch)
tree8f0cfe52ba0b3fb8a6ed3cc5d3e7449369b0b9a5 /fs/btrfs/extent-tree.c
parent4a64001f0047956e283f7ada9843dfc3f3b5d8c8 (diff)
Btrfs: change reserved_extents to an atomic_t
We track delayed allocation per inodes via 2 counters, one is outstanding_extents and reserved_extents. Outstanding_extents is already an atomic_t, but reserved_extents is not and is protected by a spinlock. So convert this to an atomic_t and instead of using a spinlock, use atomic_cmpxchg when releasing delalloc bytes. This makes our inode 72 bytes smaller, and reduces locking overhead (albiet it was minimal to begin with). Thanks, Signed-off-by: Josef Bacik <josef@redhat.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c42
1 files changed, 26 insertions, 16 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 7b3089b5c2df..27376c97d85f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3996,6 +3996,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
3996 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; 3996 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
3997 u64 to_reserve; 3997 u64 to_reserve;
3998 int nr_extents; 3998 int nr_extents;
3999 int reserved_extents;
3999 int ret; 4000 int ret;
4000 4001
4001 if (btrfs_transaction_in_commit(root->fs_info)) 4002 if (btrfs_transaction_in_commit(root->fs_info))
@@ -4003,25 +4004,24 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4003 4004
4004 num_bytes = ALIGN(num_bytes, root->sectorsize); 4005 num_bytes = ALIGN(num_bytes, root->sectorsize);
4005 4006
4006 spin_lock(&BTRFS_I(inode)->accounting_lock);
4007 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1; 4007 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
4008 if (nr_extents > BTRFS_I(inode)->reserved_extents) { 4008 reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
4009 nr_extents -= BTRFS_I(inode)->reserved_extents; 4009
4010 if (nr_extents > reserved_extents) {
4011 nr_extents -= reserved_extents;
4010 to_reserve = calc_trans_metadata_size(root, nr_extents); 4012 to_reserve = calc_trans_metadata_size(root, nr_extents);
4011 } else { 4013 } else {
4012 nr_extents = 0; 4014 nr_extents = 0;
4013 to_reserve = 0; 4015 to_reserve = 0;
4014 } 4016 }
4015 spin_unlock(&BTRFS_I(inode)->accounting_lock); 4017
4016 to_reserve += calc_csum_metadata_size(inode, num_bytes); 4018 to_reserve += calc_csum_metadata_size(inode, num_bytes);
4017 ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); 4019 ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
4018 if (ret) 4020 if (ret)
4019 return ret; 4021 return ret;
4020 4022
4021 spin_lock(&BTRFS_I(inode)->accounting_lock); 4023 atomic_add(nr_extents, &BTRFS_I(inode)->reserved_extents);
4022 BTRFS_I(inode)->reserved_extents += nr_extents;
4023 atomic_inc(&BTRFS_I(inode)->outstanding_extents); 4024 atomic_inc(&BTRFS_I(inode)->outstanding_extents);
4024 spin_unlock(&BTRFS_I(inode)->accounting_lock);
4025 4025
4026 block_rsv_add_bytes(block_rsv, to_reserve, 1); 4026 block_rsv_add_bytes(block_rsv, to_reserve, 1);
4027 4027
@@ -4036,20 +4036,30 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4036 struct btrfs_root *root = BTRFS_I(inode)->root; 4036 struct btrfs_root *root = BTRFS_I(inode)->root;
4037 u64 to_free; 4037 u64 to_free;
4038 int nr_extents; 4038 int nr_extents;
4039 int reserved_extents;
4039 4040
4040 num_bytes = ALIGN(num_bytes, root->sectorsize); 4041 num_bytes = ALIGN(num_bytes, root->sectorsize);
4041 atomic_dec(&BTRFS_I(inode)->outstanding_extents); 4042 atomic_dec(&BTRFS_I(inode)->outstanding_extents);
4042 WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0); 4043 WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
4043 4044
4044 spin_lock(&BTRFS_I(inode)->accounting_lock); 4045 reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
4045 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); 4046 do {
4046 if (nr_extents < BTRFS_I(inode)->reserved_extents) { 4047 int old, new;
4047 nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents; 4048
4048 BTRFS_I(inode)->reserved_extents -= nr_extents; 4049 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
4049 } else { 4050 if (nr_extents >= reserved_extents) {
4050 nr_extents = 0; 4051 nr_extents = 0;
4051 } 4052 break;
4052 spin_unlock(&BTRFS_I(inode)->accounting_lock); 4053 }
4054 old = reserved_extents;
4055 nr_extents = reserved_extents - nr_extents;
4056 new = reserved_extents - nr_extents;
4057 old = atomic_cmpxchg(&BTRFS_I(inode)->reserved_extents,
4058 reserved_extents, new);
4059 if (likely(old == reserved_extents))
4060 break;
4061 reserved_extents = old;
4062 } while (1);
4053 4063
4054 to_free = calc_csum_metadata_size(inode, num_bytes); 4064 to_free = calc_csum_metadata_size(inode, num_bytes);
4055 if (nr_extents > 0) 4065 if (nr_extents > 0)