aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-09-02 16:24:52 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-11 13:31:05 -0400
commit890871be854b5f5e43e7ba2475f706209906cc24 (patch)
tree9d087adf7a28bb910992d07d93ea2a992e394110 /fs
parent57fd5a5ff8b48b99e90b22fc143082aba755c6c0 (diff)
Btrfs: switch extent_map to a rw lock
There are two main users of the extent_map tree. The first is regular file inodes, where it is evenly spread between readers and writers. The second is the chunk allocation tree, which maps blocks from logical addresses to phyiscal ones, and it is 99.99% reads. The mapping tree is a point of lock contention during heavy IO workloads, so this commit switches things to a rw lock. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/compression.c8
-rw-r--r--fs/btrfs/disk-io.c14
-rw-r--r--fs/btrfs/extent-tree.c4
-rw-r--r--fs/btrfs/extent_io.c8
-rw-r--r--fs/btrfs/extent_map.c5
-rw-r--r--fs/btrfs/extent_map.h2
-rw-r--r--fs/btrfs/file.c8
-rw-r--r--fs/btrfs/inode.c24
-rw-r--r--fs/btrfs/relocation.c4
-rw-r--r--fs/btrfs/volumes.c40
10 files changed, 57 insertions, 60 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index de1e2fd32080..78451a58f209 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -507,10 +507,10 @@ static noinline int add_ra_bio_pages(struct inode *inode,
507 */ 507 */
508 set_page_extent_mapped(page); 508 set_page_extent_mapped(page);
509 lock_extent(tree, last_offset, end, GFP_NOFS); 509 lock_extent(tree, last_offset, end, GFP_NOFS);
510 spin_lock(&em_tree->lock); 510 read_lock(&em_tree->lock);
511 em = lookup_extent_mapping(em_tree, last_offset, 511 em = lookup_extent_mapping(em_tree, last_offset,
512 PAGE_CACHE_SIZE); 512 PAGE_CACHE_SIZE);
513 spin_unlock(&em_tree->lock); 513 read_unlock(&em_tree->lock);
514 514
515 if (!em || last_offset < em->start || 515 if (!em || last_offset < em->start ||
516 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 516 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
@@ -594,11 +594,11 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
594 em_tree = &BTRFS_I(inode)->extent_tree; 594 em_tree = &BTRFS_I(inode)->extent_tree;
595 595
596 /* we need the actual starting offset of this extent in the file */ 596 /* we need the actual starting offset of this extent in the file */
597 spin_lock(&em_tree->lock); 597 read_lock(&em_tree->lock);
598 em = lookup_extent_mapping(em_tree, 598 em = lookup_extent_mapping(em_tree,
599 page_offset(bio->bi_io_vec->bv_page), 599 page_offset(bio->bi_io_vec->bv_page),
600 PAGE_CACHE_SIZE); 600 PAGE_CACHE_SIZE);
601 spin_unlock(&em_tree->lock); 601 read_unlock(&em_tree->lock);
602 602
603 compressed_len = em->block_len; 603 compressed_len = em->block_len;
604 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 604 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 20cefc6f22c4..b6cfdd9164e2 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -121,15 +121,15 @@ static struct extent_map *btree_get_extent(struct inode *inode,
121 struct extent_map *em; 121 struct extent_map *em;
122 int ret; 122 int ret;
123 123
124 spin_lock(&em_tree->lock); 124 read_lock(&em_tree->lock);
125 em = lookup_extent_mapping(em_tree, start, len); 125 em = lookup_extent_mapping(em_tree, start, len);
126 if (em) { 126 if (em) {
127 em->bdev = 127 em->bdev =
128 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 128 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
129 spin_unlock(&em_tree->lock); 129 read_unlock(&em_tree->lock);
130 goto out; 130 goto out;
131 } 131 }
132 spin_unlock(&em_tree->lock); 132 read_unlock(&em_tree->lock);
133 133
134 em = alloc_extent_map(GFP_NOFS); 134 em = alloc_extent_map(GFP_NOFS);
135 if (!em) { 135 if (!em) {
@@ -142,7 +142,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
142 em->block_start = 0; 142 em->block_start = 0;
143 em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 143 em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
144 144
145 spin_lock(&em_tree->lock); 145 write_lock(&em_tree->lock);
146 ret = add_extent_mapping(em_tree, em); 146 ret = add_extent_mapping(em_tree, em);
147 if (ret == -EEXIST) { 147 if (ret == -EEXIST) {
148 u64 failed_start = em->start; 148 u64 failed_start = em->start;
@@ -161,7 +161,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
161 free_extent_map(em); 161 free_extent_map(em);
162 em = NULL; 162 em = NULL;
163 } 163 }
164 spin_unlock(&em_tree->lock); 164 write_unlock(&em_tree->lock);
165 165
166 if (ret) 166 if (ret)
167 em = ERR_PTR(ret); 167 em = ERR_PTR(ret);
@@ -1323,9 +1323,9 @@ static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1323 offset = page_offset(page); 1323 offset = page_offset(page);
1324 1324
1325 em_tree = &BTRFS_I(inode)->extent_tree; 1325 em_tree = &BTRFS_I(inode)->extent_tree;
1326 spin_lock(&em_tree->lock); 1326 read_lock(&em_tree->lock);
1327 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); 1327 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1328 spin_unlock(&em_tree->lock); 1328 read_unlock(&em_tree->lock);
1329 if (!em) { 1329 if (!em) {
1330 __unplug_io_fn(bdi, page); 1330 __unplug_io_fn(bdi, page);
1331 return; 1331 return;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 72a2b9c28e9f..edd86ae9e149 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5396,9 +5396,9 @@ static noinline int relocate_data_extent(struct inode *reloc_inode,
5396 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS); 5396 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
5397 while (1) { 5397 while (1) {
5398 int ret; 5398 int ret;
5399 spin_lock(&em_tree->lock); 5399 write_lock(&em_tree->lock);
5400 ret = add_extent_mapping(em_tree, em); 5400 ret = add_extent_mapping(em_tree, em);
5401 spin_unlock(&em_tree->lock); 5401 write_unlock(&em_tree->lock);
5402 if (ret != -EEXIST) { 5402 if (ret != -EEXIST) {
5403 free_extent_map(em); 5403 free_extent_map(em);
5404 break; 5404 break;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8d7a152a90c6..41cf1b451b41 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2786,15 +2786,15 @@ int try_release_extent_mapping(struct extent_map_tree *map,
2786 u64 len; 2786 u64 len;
2787 while (start <= end) { 2787 while (start <= end) {
2788 len = end - start + 1; 2788 len = end - start + 1;
2789 spin_lock(&map->lock); 2789 write_lock(&map->lock);
2790 em = lookup_extent_mapping(map, start, len); 2790 em = lookup_extent_mapping(map, start, len);
2791 if (!em || IS_ERR(em)) { 2791 if (!em || IS_ERR(em)) {
2792 spin_unlock(&map->lock); 2792 write_unlock(&map->lock);
2793 break; 2793 break;
2794 } 2794 }
2795 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) || 2795 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2796 em->start != start) { 2796 em->start != start) {
2797 spin_unlock(&map->lock); 2797 write_unlock(&map->lock);
2798 free_extent_map(em); 2798 free_extent_map(em);
2799 break; 2799 break;
2800 } 2800 }
@@ -2808,7 +2808,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
2808 free_extent_map(em); 2808 free_extent_map(em);
2809 } 2809 }
2810 start = extent_map_end(em); 2810 start = extent_map_end(em);
2811 spin_unlock(&map->lock); 2811 write_unlock(&map->lock);
2812 2812
2813 /* once for us */ 2813 /* once for us */
2814 free_extent_map(em); 2814 free_extent_map(em);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 30c9365861e6..72e9fa3c31f5 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -36,7 +36,7 @@ void extent_map_exit(void)
36void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask) 36void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
37{ 37{
38 tree->map.rb_node = NULL; 38 tree->map.rb_node = NULL;
39 spin_lock_init(&tree->lock); 39 rwlock_init(&tree->lock);
40} 40}
41 41
42/** 42/**
@@ -222,7 +222,6 @@ int add_extent_mapping(struct extent_map_tree *tree,
222 ret = -EEXIST; 222 ret = -EEXIST;
223 goto out; 223 goto out;
224 } 224 }
225 assert_spin_locked(&tree->lock);
226 rb = tree_insert(&tree->map, em->start, &em->rb_node); 225 rb = tree_insert(&tree->map, em->start, &em->rb_node);
227 if (rb) { 226 if (rb) {
228 ret = -EEXIST; 227 ret = -EEXIST;
@@ -285,7 +284,6 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
285 struct rb_node *next = NULL; 284 struct rb_node *next = NULL;
286 u64 end = range_end(start, len); 285 u64 end = range_end(start, len);
287 286
288 assert_spin_locked(&tree->lock);
289 rb_node = __tree_search(&tree->map, start, &prev, &next); 287 rb_node = __tree_search(&tree->map, start, &prev, &next);
290 if (!rb_node && prev) { 288 if (!rb_node && prev) {
291 em = rb_entry(prev, struct extent_map, rb_node); 289 em = rb_entry(prev, struct extent_map, rb_node);
@@ -331,7 +329,6 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
331 int ret = 0; 329 int ret = 0;
332 330
333 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); 331 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
334 assert_spin_locked(&tree->lock);
335 rb_erase(&em->rb_node, &tree->map); 332 rb_erase(&em->rb_node, &tree->map);
336 em->in_tree = 0; 333 em->in_tree = 0;
337 return ret; 334 return ret;
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index fb6eeef06bb0..6216dfbcf9be 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -31,7 +31,7 @@ struct extent_map {
31 31
32struct extent_map_tree { 32struct extent_map_tree {
33 struct rb_root map; 33 struct rb_root map;
34 spinlock_t lock; 34 rwlock_t lock;
35}; 35};
36 36
37static inline u64 extent_map_end(struct extent_map *em) 37static inline u64 extent_map_end(struct extent_map *em)
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a760d97279ac..8a9c76aecdf3 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -188,15 +188,15 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
188 if (!split2) 188 if (!split2)
189 split2 = alloc_extent_map(GFP_NOFS); 189 split2 = alloc_extent_map(GFP_NOFS);
190 190
191 spin_lock(&em_tree->lock); 191 write_lock(&em_tree->lock);
192 em = lookup_extent_mapping(em_tree, start, len); 192 em = lookup_extent_mapping(em_tree, start, len);
193 if (!em) { 193 if (!em) {
194 spin_unlock(&em_tree->lock); 194 write_unlock(&em_tree->lock);
195 break; 195 break;
196 } 196 }
197 flags = em->flags; 197 flags = em->flags;
198 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 198 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
199 spin_unlock(&em_tree->lock); 199 write_unlock(&em_tree->lock);
200 if (em->start <= start && 200 if (em->start <= start &&
201 (!testend || em->start + em->len >= start + len)) { 201 (!testend || em->start + em->len >= start + len)) {
202 free_extent_map(em); 202 free_extent_map(em);
@@ -259,7 +259,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
259 free_extent_map(split); 259 free_extent_map(split);
260 split = NULL; 260 split = NULL;
261 } 261 }
262 spin_unlock(&em_tree->lock); 262 write_unlock(&em_tree->lock);
263 263
264 /* once for us */ 264 /* once for us */
265 free_extent_map(em); 265 free_extent_map(em);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 04b53b5ebe59..f1df11718618 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -612,9 +612,9 @@ static noinline int submit_compressed_extents(struct inode *inode,
612 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 612 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
613 613
614 while (1) { 614 while (1) {
615 spin_lock(&em_tree->lock); 615 write_lock(&em_tree->lock);
616 ret = add_extent_mapping(em_tree, em); 616 ret = add_extent_mapping(em_tree, em);
617 spin_unlock(&em_tree->lock); 617 write_unlock(&em_tree->lock);
618 if (ret != -EEXIST) { 618 if (ret != -EEXIST) {
619 free_extent_map(em); 619 free_extent_map(em);
620 break; 620 break;
@@ -748,9 +748,9 @@ static noinline int cow_file_range(struct inode *inode,
748 set_bit(EXTENT_FLAG_PINNED, &em->flags); 748 set_bit(EXTENT_FLAG_PINNED, &em->flags);
749 749
750 while (1) { 750 while (1) {
751 spin_lock(&em_tree->lock); 751 write_lock(&em_tree->lock);
752 ret = add_extent_mapping(em_tree, em); 752 ret = add_extent_mapping(em_tree, em);
753 spin_unlock(&em_tree->lock); 753 write_unlock(&em_tree->lock);
754 if (ret != -EEXIST) { 754 if (ret != -EEXIST) {
755 free_extent_map(em); 755 free_extent_map(em);
756 break; 756 break;
@@ -1081,9 +1081,9 @@ out_check:
1081 em->bdev = root->fs_info->fs_devices->latest_bdev; 1081 em->bdev = root->fs_info->fs_devices->latest_bdev;
1082 set_bit(EXTENT_FLAG_PINNED, &em->flags); 1082 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1083 while (1) { 1083 while (1) {
1084 spin_lock(&em_tree->lock); 1084 write_lock(&em_tree->lock);
1085 ret = add_extent_mapping(em_tree, em); 1085 ret = add_extent_mapping(em_tree, em);
1086 spin_unlock(&em_tree->lock); 1086 write_unlock(&em_tree->lock);
1087 if (ret != -EEXIST) { 1087 if (ret != -EEXIST) {
1088 free_extent_map(em); 1088 free_extent_map(em);
1089 break; 1089 break;
@@ -1670,13 +1670,13 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
1670 failrec->last_mirror = 0; 1670 failrec->last_mirror = 0;
1671 failrec->bio_flags = 0; 1671 failrec->bio_flags = 0;
1672 1672
1673 spin_lock(&em_tree->lock); 1673 read_lock(&em_tree->lock);
1674 em = lookup_extent_mapping(em_tree, start, failrec->len); 1674 em = lookup_extent_mapping(em_tree, start, failrec->len);
1675 if (em->start > start || em->start + em->len < start) { 1675 if (em->start > start || em->start + em->len < start) {
1676 free_extent_map(em); 1676 free_extent_map(em);
1677 em = NULL; 1677 em = NULL;
1678 } 1678 }
1679 spin_unlock(&em_tree->lock); 1679 read_unlock(&em_tree->lock);
1680 1680
1681 if (!em || IS_ERR(em)) { 1681 if (!em || IS_ERR(em)) {
1682 kfree(failrec); 1682 kfree(failrec);
@@ -4069,11 +4069,11 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4069 int compressed; 4069 int compressed;
4070 4070
4071again: 4071again:
4072 spin_lock(&em_tree->lock); 4072 read_lock(&em_tree->lock);
4073 em = lookup_extent_mapping(em_tree, start, len); 4073 em = lookup_extent_mapping(em_tree, start, len);
4074 if (em) 4074 if (em)
4075 em->bdev = root->fs_info->fs_devices->latest_bdev; 4075 em->bdev = root->fs_info->fs_devices->latest_bdev;
4076 spin_unlock(&em_tree->lock); 4076 read_unlock(&em_tree->lock);
4077 4077
4078 if (em) { 4078 if (em) {
4079 if (em->start > start || em->start + em->len <= start) 4079 if (em->start > start || em->start + em->len <= start)
@@ -4264,7 +4264,7 @@ insert:
4264 } 4264 }
4265 4265
4266 err = 0; 4266 err = 0;
4267 spin_lock(&em_tree->lock); 4267 write_lock(&em_tree->lock);
4268 ret = add_extent_mapping(em_tree, em); 4268 ret = add_extent_mapping(em_tree, em);
4269 /* it is possible that someone inserted the extent into the tree 4269 /* it is possible that someone inserted the extent into the tree
4270 * while we had the lock dropped. It is also possible that 4270 * while we had the lock dropped. It is also possible that
@@ -4304,7 +4304,7 @@ insert:
4304 err = 0; 4304 err = 0;
4305 } 4305 }
4306 } 4306 }
4307 spin_unlock(&em_tree->lock); 4307 write_unlock(&em_tree->lock);
4308out: 4308out:
4309 if (path) 4309 if (path)
4310 btrfs_free_path(path); 4310 btrfs_free_path(path);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index c04f7f212602..4adab903fc2b 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2646,9 +2646,9 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key)
2646 lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2646 lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
2647 while (1) { 2647 while (1) {
2648 int ret; 2648 int ret;
2649 spin_lock(&em_tree->lock); 2649 write_lock(&em_tree->lock);
2650 ret = add_extent_mapping(em_tree, em); 2650 ret = add_extent_mapping(em_tree, em);
2651 spin_unlock(&em_tree->lock); 2651 write_unlock(&em_tree->lock);
2652 if (ret != -EEXIST) { 2652 if (ret != -EEXIST) {
2653 free_extent_map(em); 2653 free_extent_map(em);
2654 break; 2654 break;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a7e53773e743..d2358c06bbd9 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1749,9 +1749,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
1749 * step two, delete the device extents and the 1749 * step two, delete the device extents and the
1750 * chunk tree entries 1750 * chunk tree entries
1751 */ 1751 */
1752 spin_lock(&em_tree->lock); 1752 read_lock(&em_tree->lock);
1753 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 1753 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1754 spin_unlock(&em_tree->lock); 1754 read_unlock(&em_tree->lock);
1755 1755
1756 BUG_ON(em->start > chunk_offset || 1756 BUG_ON(em->start > chunk_offset ||
1757 em->start + em->len < chunk_offset); 1757 em->start + em->len < chunk_offset);
@@ -1780,9 +1780,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
1780 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset); 1780 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1781 BUG_ON(ret); 1781 BUG_ON(ret);
1782 1782
1783 spin_lock(&em_tree->lock); 1783 write_lock(&em_tree->lock);
1784 remove_extent_mapping(em_tree, em); 1784 remove_extent_mapping(em_tree, em);
1785 spin_unlock(&em_tree->lock); 1785 write_unlock(&em_tree->lock);
1786 1786
1787 kfree(map); 1787 kfree(map);
1788 em->bdev = NULL; 1788 em->bdev = NULL;
@@ -2294,9 +2294,9 @@ again:
2294 em->block_len = em->len; 2294 em->block_len = em->len;
2295 2295
2296 em_tree = &extent_root->fs_info->mapping_tree.map_tree; 2296 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2297 spin_lock(&em_tree->lock); 2297 write_lock(&em_tree->lock);
2298 ret = add_extent_mapping(em_tree, em); 2298 ret = add_extent_mapping(em_tree, em);
2299 spin_unlock(&em_tree->lock); 2299 write_unlock(&em_tree->lock);
2300 BUG_ON(ret); 2300 BUG_ON(ret);
2301 free_extent_map(em); 2301 free_extent_map(em);
2302 2302
@@ -2491,9 +2491,9 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2491 int readonly = 0; 2491 int readonly = 0;
2492 int i; 2492 int i;
2493 2493
2494 spin_lock(&map_tree->map_tree.lock); 2494 read_lock(&map_tree->map_tree.lock);
2495 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 2495 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2496 spin_unlock(&map_tree->map_tree.lock); 2496 read_unlock(&map_tree->map_tree.lock);
2497 if (!em) 2497 if (!em)
2498 return 1; 2498 return 1;
2499 2499
@@ -2518,11 +2518,11 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2518 struct extent_map *em; 2518 struct extent_map *em;
2519 2519
2520 while (1) { 2520 while (1) {
2521 spin_lock(&tree->map_tree.lock); 2521 write_lock(&tree->map_tree.lock);
2522 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); 2522 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2523 if (em) 2523 if (em)
2524 remove_extent_mapping(&tree->map_tree, em); 2524 remove_extent_mapping(&tree->map_tree, em);
2525 spin_unlock(&tree->map_tree.lock); 2525 write_unlock(&tree->map_tree.lock);
2526 if (!em) 2526 if (!em)
2527 break; 2527 break;
2528 kfree(em->bdev); 2528 kfree(em->bdev);
@@ -2540,9 +2540,9 @@ int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
2540 struct extent_map_tree *em_tree = &map_tree->map_tree; 2540 struct extent_map_tree *em_tree = &map_tree->map_tree;
2541 int ret; 2541 int ret;
2542 2542
2543 spin_lock(&em_tree->lock); 2543 read_lock(&em_tree->lock);
2544 em = lookup_extent_mapping(em_tree, logical, len); 2544 em = lookup_extent_mapping(em_tree, logical, len);
2545 spin_unlock(&em_tree->lock); 2545 read_unlock(&em_tree->lock);
2546 BUG_ON(!em); 2546 BUG_ON(!em);
2547 2547
2548 BUG_ON(em->start > logical || em->start + em->len < logical); 2548 BUG_ON(em->start > logical || em->start + em->len < logical);
@@ -2604,9 +2604,9 @@ again:
2604 atomic_set(&multi->error, 0); 2604 atomic_set(&multi->error, 0);
2605 } 2605 }
2606 2606
2607 spin_lock(&em_tree->lock); 2607 read_lock(&em_tree->lock);
2608 em = lookup_extent_mapping(em_tree, logical, *length); 2608 em = lookup_extent_mapping(em_tree, logical, *length);
2609 spin_unlock(&em_tree->lock); 2609 read_unlock(&em_tree->lock);
2610 2610
2611 if (!em && unplug_page) 2611 if (!em && unplug_page)
2612 return 0; 2612 return 0;
@@ -2763,9 +2763,9 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
2763 u64 stripe_nr; 2763 u64 stripe_nr;
2764 int i, j, nr = 0; 2764 int i, j, nr = 0;
2765 2765
2766 spin_lock(&em_tree->lock); 2766 read_lock(&em_tree->lock);
2767 em = lookup_extent_mapping(em_tree, chunk_start, 1); 2767 em = lookup_extent_mapping(em_tree, chunk_start, 1);
2768 spin_unlock(&em_tree->lock); 2768 read_unlock(&em_tree->lock);
2769 2769
2770 BUG_ON(!em || em->start != chunk_start); 2770 BUG_ON(!em || em->start != chunk_start);
2771 map = (struct map_lookup *)em->bdev; 2771 map = (struct map_lookup *)em->bdev;
@@ -3053,9 +3053,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3053 logical = key->offset; 3053 logical = key->offset;
3054 length = btrfs_chunk_length(leaf, chunk); 3054 length = btrfs_chunk_length(leaf, chunk);
3055 3055
3056 spin_lock(&map_tree->map_tree.lock); 3056 read_lock(&map_tree->map_tree.lock);
3057 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); 3057 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
3058 spin_unlock(&map_tree->map_tree.lock); 3058 read_unlock(&map_tree->map_tree.lock);
3059 3059
3060 /* already mapped? */ 3060 /* already mapped? */
3061 if (em && em->start <= logical && em->start + em->len > logical) { 3061 if (em && em->start <= logical && em->start + em->len > logical) {
@@ -3114,9 +3114,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3114 map->stripes[i].dev->in_fs_metadata = 1; 3114 map->stripes[i].dev->in_fs_metadata = 1;
3115 } 3115 }
3116 3116
3117 spin_lock(&map_tree->map_tree.lock); 3117 write_lock(&map_tree->map_tree.lock);
3118 ret = add_extent_mapping(&map_tree->map_tree, em); 3118 ret = add_extent_mapping(&map_tree->map_tree, em);
3119 spin_unlock(&map_tree->map_tree.lock); 3119 write_unlock(&map_tree->map_tree.lock);
3120 BUG_ON(ret); 3120 BUG_ON(ret);
3121 free_extent_map(em); 3121 free_extent_map(em);
3122 3122