aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_map.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-09-02 16:24:52 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-11 13:31:05 -0400
commit890871be854b5f5e43e7ba2475f706209906cc24 (patch)
tree9d087adf7a28bb910992d07d93ea2a992e394110 /fs/btrfs/extent_map.c
parent57fd5a5ff8b48b99e90b22fc143082aba755c6c0 (diff)
Btrfs: switch extent_map to a rw lock
There are two main users of the extent_map tree. The first is regular file inodes, where it is evenly spread between readers and writers. The second is the chunk allocation tree, which maps blocks from logical addresses to phyiscal ones, and it is 99.99% reads. The mapping tree is a point of lock contention during heavy IO workloads, so this commit switches things to a rw lock. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_map.c')
-rw-r--r--fs/btrfs/extent_map.c5
1 files changed, 1 insertions, 4 deletions
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 30c9365861e6..72e9fa3c31f5 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -36,7 +36,7 @@ void extent_map_exit(void)
36void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask) 36void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
37{ 37{
38 tree->map.rb_node = NULL; 38 tree->map.rb_node = NULL;
39 spin_lock_init(&tree->lock); 39 rwlock_init(&tree->lock);
40} 40}
41 41
42/** 42/**
@@ -222,7 +222,6 @@ int add_extent_mapping(struct extent_map_tree *tree,
222 ret = -EEXIST; 222 ret = -EEXIST;
223 goto out; 223 goto out;
224 } 224 }
225 assert_spin_locked(&tree->lock);
226 rb = tree_insert(&tree->map, em->start, &em->rb_node); 225 rb = tree_insert(&tree->map, em->start, &em->rb_node);
227 if (rb) { 226 if (rb) {
228 ret = -EEXIST; 227 ret = -EEXIST;
@@ -285,7 +284,6 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
285 struct rb_node *next = NULL; 284 struct rb_node *next = NULL;
286 u64 end = range_end(start, len); 285 u64 end = range_end(start, len);
287 286
288 assert_spin_locked(&tree->lock);
289 rb_node = __tree_search(&tree->map, start, &prev, &next); 287 rb_node = __tree_search(&tree->map, start, &prev, &next);
290 if (!rb_node && prev) { 288 if (!rb_node && prev) {
291 em = rb_entry(prev, struct extent_map, rb_node); 289 em = rb_entry(prev, struct extent_map, rb_node);
@@ -331,7 +329,6 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
331 int ret = 0; 329 int ret = 0;
332 330
333 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); 331 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
334 assert_spin_locked(&tree->lock);
335 rb_erase(&em->rb_node, &tree->map); 332 rb_erase(&em->rb_node, &tree->map);
336 em->in_tree = 0; 333 em->in_tree = 0;
337 return ret; 334 return ret;