aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/compression.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-09-02 16:24:52 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-11 13:31:05 -0400
commit890871be854b5f5e43e7ba2475f706209906cc24 (patch)
tree9d087adf7a28bb910992d07d93ea2a992e394110 /fs/btrfs/compression.c
parent57fd5a5ff8b48b99e90b22fc143082aba755c6c0 (diff)
Btrfs: switch extent_map to a rw lock
There are two main users of the extent_map tree. The first is regular file inodes, where it is evenly spread between readers and writers. The second is the chunk allocation tree, which maps blocks from logical addresses to phyiscal ones, and it is 99.99% reads. The mapping tree is a point of lock contention during heavy IO workloads, so this commit switches things to a rw lock. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/compression.c')
-rw-r--r--fs/btrfs/compression.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index de1e2fd32080..78451a58f209 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -507,10 +507,10 @@ static noinline int add_ra_bio_pages(struct inode *inode,
507 */ 507 */
508 set_page_extent_mapped(page); 508 set_page_extent_mapped(page);
509 lock_extent(tree, last_offset, end, GFP_NOFS); 509 lock_extent(tree, last_offset, end, GFP_NOFS);
510 spin_lock(&em_tree->lock); 510 read_lock(&em_tree->lock);
511 em = lookup_extent_mapping(em_tree, last_offset, 511 em = lookup_extent_mapping(em_tree, last_offset,
512 PAGE_CACHE_SIZE); 512 PAGE_CACHE_SIZE);
513 spin_unlock(&em_tree->lock); 513 read_unlock(&em_tree->lock);
514 514
515 if (!em || last_offset < em->start || 515 if (!em || last_offset < em->start ||
516 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 516 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
@@ -594,11 +594,11 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
594 em_tree = &BTRFS_I(inode)->extent_tree; 594 em_tree = &BTRFS_I(inode)->extent_tree;
595 595
596 /* we need the actual starting offset of this extent in the file */ 596 /* we need the actual starting offset of this extent in the file */
597 spin_lock(&em_tree->lock); 597 read_lock(&em_tree->lock);
598 em = lookup_extent_mapping(em_tree, 598 em = lookup_extent_mapping(em_tree,
599 page_offset(bio->bi_io_vec->bv_page), 599 page_offset(bio->bi_io_vec->bv_page),
600 PAGE_CACHE_SIZE); 600 PAGE_CACHE_SIZE);
601 spin_unlock(&em_tree->lock); 601 read_unlock(&em_tree->lock);
602 602
603 compressed_len = em->block_len; 603 compressed_len = em->block_len;
604 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 604 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);