aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/volumes.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-09-02 16:24:52 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-11 13:31:05 -0400
commit890871be854b5f5e43e7ba2475f706209906cc24 (patch)
tree9d087adf7a28bb910992d07d93ea2a992e394110 /fs/btrfs/volumes.c
parent57fd5a5ff8b48b99e90b22fc143082aba755c6c0 (diff)
Btrfs: switch extent_map to a rw lock
There are two main users of the extent_map tree. The first is regular file inodes, where it is evenly spread between readers and writers. The second is the chunk allocation tree, which maps blocks from logical addresses to phyiscal ones, and it is 99.99% reads. The mapping tree is a point of lock contention during heavy IO workloads, so this commit switches things to a rw lock. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/volumes.c')
-rw-r--r--fs/btrfs/volumes.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a7e53773e743..d2358c06bbd9 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1749,9 +1749,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
1749 * step two, delete the device extents and the 1749 * step two, delete the device extents and the
1750 * chunk tree entries 1750 * chunk tree entries
1751 */ 1751 */
1752 spin_lock(&em_tree->lock); 1752 read_lock(&em_tree->lock);
1753 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 1753 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1754 spin_unlock(&em_tree->lock); 1754 read_unlock(&em_tree->lock);
1755 1755
1756 BUG_ON(em->start > chunk_offset || 1756 BUG_ON(em->start > chunk_offset ||
1757 em->start + em->len < chunk_offset); 1757 em->start + em->len < chunk_offset);
@@ -1780,9 +1780,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
1780 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset); 1780 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1781 BUG_ON(ret); 1781 BUG_ON(ret);
1782 1782
1783 spin_lock(&em_tree->lock); 1783 write_lock(&em_tree->lock);
1784 remove_extent_mapping(em_tree, em); 1784 remove_extent_mapping(em_tree, em);
1785 spin_unlock(&em_tree->lock); 1785 write_unlock(&em_tree->lock);
1786 1786
1787 kfree(map); 1787 kfree(map);
1788 em->bdev = NULL; 1788 em->bdev = NULL;
@@ -2294,9 +2294,9 @@ again:
2294 em->block_len = em->len; 2294 em->block_len = em->len;
2295 2295
2296 em_tree = &extent_root->fs_info->mapping_tree.map_tree; 2296 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2297 spin_lock(&em_tree->lock); 2297 write_lock(&em_tree->lock);
2298 ret = add_extent_mapping(em_tree, em); 2298 ret = add_extent_mapping(em_tree, em);
2299 spin_unlock(&em_tree->lock); 2299 write_unlock(&em_tree->lock);
2300 BUG_ON(ret); 2300 BUG_ON(ret);
2301 free_extent_map(em); 2301 free_extent_map(em);
2302 2302
@@ -2491,9 +2491,9 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2491 int readonly = 0; 2491 int readonly = 0;
2492 int i; 2492 int i;
2493 2493
2494 spin_lock(&map_tree->map_tree.lock); 2494 read_lock(&map_tree->map_tree.lock);
2495 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 2495 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2496 spin_unlock(&map_tree->map_tree.lock); 2496 read_unlock(&map_tree->map_tree.lock);
2497 if (!em) 2497 if (!em)
2498 return 1; 2498 return 1;
2499 2499
@@ -2518,11 +2518,11 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2518 struct extent_map *em; 2518 struct extent_map *em;
2519 2519
2520 while (1) { 2520 while (1) {
2521 spin_lock(&tree->map_tree.lock); 2521 write_lock(&tree->map_tree.lock);
2522 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); 2522 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2523 if (em) 2523 if (em)
2524 remove_extent_mapping(&tree->map_tree, em); 2524 remove_extent_mapping(&tree->map_tree, em);
2525 spin_unlock(&tree->map_tree.lock); 2525 write_unlock(&tree->map_tree.lock);
2526 if (!em) 2526 if (!em)
2527 break; 2527 break;
2528 kfree(em->bdev); 2528 kfree(em->bdev);
@@ -2540,9 +2540,9 @@ int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
2540 struct extent_map_tree *em_tree = &map_tree->map_tree; 2540 struct extent_map_tree *em_tree = &map_tree->map_tree;
2541 int ret; 2541 int ret;
2542 2542
2543 spin_lock(&em_tree->lock); 2543 read_lock(&em_tree->lock);
2544 em = lookup_extent_mapping(em_tree, logical, len); 2544 em = lookup_extent_mapping(em_tree, logical, len);
2545 spin_unlock(&em_tree->lock); 2545 read_unlock(&em_tree->lock);
2546 BUG_ON(!em); 2546 BUG_ON(!em);
2547 2547
2548 BUG_ON(em->start > logical || em->start + em->len < logical); 2548 BUG_ON(em->start > logical || em->start + em->len < logical);
@@ -2604,9 +2604,9 @@ again:
2604 atomic_set(&multi->error, 0); 2604 atomic_set(&multi->error, 0);
2605 } 2605 }
2606 2606
2607 spin_lock(&em_tree->lock); 2607 read_lock(&em_tree->lock);
2608 em = lookup_extent_mapping(em_tree, logical, *length); 2608 em = lookup_extent_mapping(em_tree, logical, *length);
2609 spin_unlock(&em_tree->lock); 2609 read_unlock(&em_tree->lock);
2610 2610
2611 if (!em && unplug_page) 2611 if (!em && unplug_page)
2612 return 0; 2612 return 0;
@@ -2763,9 +2763,9 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
2763 u64 stripe_nr; 2763 u64 stripe_nr;
2764 int i, j, nr = 0; 2764 int i, j, nr = 0;
2765 2765
2766 spin_lock(&em_tree->lock); 2766 read_lock(&em_tree->lock);
2767 em = lookup_extent_mapping(em_tree, chunk_start, 1); 2767 em = lookup_extent_mapping(em_tree, chunk_start, 1);
2768 spin_unlock(&em_tree->lock); 2768 read_unlock(&em_tree->lock);
2769 2769
2770 BUG_ON(!em || em->start != chunk_start); 2770 BUG_ON(!em || em->start != chunk_start);
2771 map = (struct map_lookup *)em->bdev; 2771 map = (struct map_lookup *)em->bdev;
@@ -3053,9 +3053,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3053 logical = key->offset; 3053 logical = key->offset;
3054 length = btrfs_chunk_length(leaf, chunk); 3054 length = btrfs_chunk_length(leaf, chunk);
3055 3055
3056 spin_lock(&map_tree->map_tree.lock); 3056 read_lock(&map_tree->map_tree.lock);
3057 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); 3057 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
3058 spin_unlock(&map_tree->map_tree.lock); 3058 read_unlock(&map_tree->map_tree.lock);
3059 3059
3060 /* already mapped? */ 3060 /* already mapped? */
3061 if (em && em->start <= logical && em->start + em->len > logical) { 3061 if (em && em->start <= logical && em->start + em->len > logical) {
@@ -3114,9 +3114,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
3114 map->stripes[i].dev->in_fs_metadata = 1; 3114 map->stripes[i].dev->in_fs_metadata = 1;
3115 } 3115 }
3116 3116
3117 spin_lock(&map_tree->map_tree.lock); 3117 write_lock(&map_tree->map_tree.lock);
3118 ret = add_extent_mapping(&map_tree->map_tree, em); 3118 ret = add_extent_mapping(&map_tree->map_tree, em);
3119 spin_unlock(&map_tree->map_tree.lock); 3119 write_unlock(&map_tree->map_tree.lock);
3120 BUG_ON(ret); 3120 BUG_ON(ret);
3121 free_extent_map(em); 3121 free_extent_map(em);
3122 3122