aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/disk-io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-09-02 16:24:52 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-11 13:31:05 -0400
commit890871be854b5f5e43e7ba2475f706209906cc24 (patch)
tree9d087adf7a28bb910992d07d93ea2a992e394110 /fs/btrfs/disk-io.c
parent57fd5a5ff8b48b99e90b22fc143082aba755c6c0 (diff)
Btrfs: switch extent_map to a rw lock
There are two main users of the extent_map tree. The first is regular file inodes, where it is evenly spread between readers and writers. The second is the chunk allocation tree, which maps blocks from logical addresses to phyiscal ones, and it is 99.99% reads. The mapping tree is a point of lock contention during heavy IO workloads, so this commit switches things to a rw lock. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r--fs/btrfs/disk-io.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 20cefc6f22c4..b6cfdd9164e2 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -121,15 +121,15 @@ static struct extent_map *btree_get_extent(struct inode *inode,
121 struct extent_map *em; 121 struct extent_map *em;
122 int ret; 122 int ret;
123 123
124 spin_lock(&em_tree->lock); 124 read_lock(&em_tree->lock);
125 em = lookup_extent_mapping(em_tree, start, len); 125 em = lookup_extent_mapping(em_tree, start, len);
126 if (em) { 126 if (em) {
127 em->bdev = 127 em->bdev =
128 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 128 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
129 spin_unlock(&em_tree->lock); 129 read_unlock(&em_tree->lock);
130 goto out; 130 goto out;
131 } 131 }
132 spin_unlock(&em_tree->lock); 132 read_unlock(&em_tree->lock);
133 133
134 em = alloc_extent_map(GFP_NOFS); 134 em = alloc_extent_map(GFP_NOFS);
135 if (!em) { 135 if (!em) {
@@ -142,7 +142,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
142 em->block_start = 0; 142 em->block_start = 0;
143 em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 143 em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
144 144
145 spin_lock(&em_tree->lock); 145 write_lock(&em_tree->lock);
146 ret = add_extent_mapping(em_tree, em); 146 ret = add_extent_mapping(em_tree, em);
147 if (ret == -EEXIST) { 147 if (ret == -EEXIST) {
148 u64 failed_start = em->start; 148 u64 failed_start = em->start;
@@ -161,7 +161,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
161 free_extent_map(em); 161 free_extent_map(em);
162 em = NULL; 162 em = NULL;
163 } 163 }
164 spin_unlock(&em_tree->lock); 164 write_unlock(&em_tree->lock);
165 165
166 if (ret) 166 if (ret)
167 em = ERR_PTR(ret); 167 em = ERR_PTR(ret);
@@ -1323,9 +1323,9 @@ static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1323 offset = page_offset(page); 1323 offset = page_offset(page);
1324 1324
1325 em_tree = &BTRFS_I(inode)->extent_tree; 1325 em_tree = &BTRFS_I(inode)->extent_tree;
1326 spin_lock(&em_tree->lock); 1326 read_lock(&em_tree->lock);
1327 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); 1327 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1328 spin_unlock(&em_tree->lock); 1328 read_unlock(&em_tree->lock);
1329 if (!em) { 1329 if (!em) {
1330 __unplug_io_fn(bdi, page); 1330 __unplug_io_fn(bdi, page);
1331 return; 1331 return;