aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/tree-log.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2011-07-16 15:23:14 -0400
committerChris Mason <chris.mason@oracle.com>2011-07-27 12:46:46 -0400
commitbd681513fa6f2ff29aa391f01e413a2d1c59fd77 (patch)
treebb10ec6ef876b4d7a553cbe54976ec49a0d10b21 /fs/btrfs/tree-log.c
parent81317fdeddcef259b6ecf7b5c0d04caa167c6b54 (diff)
Btrfs: switch the btrfs tree locks to reader/writer
The btrfs metadata btree is the source of significant lock contention, especially in the root node. This commit changes our locking to use a reader/writer lock. The lock is built on top of rw spinlocks, and it extends the lock tracking to remember if we have a read lock or a write lock when we go to blocking. Atomics count the number of blocking readers or writers at any given time. It removes all of the adaptive spinning from the old code and uses only the spinning/blocking hints inside of btrfs to decide when it should continue spinning. In read heavy workloads this is dramatically faster. In write heavy workloads we're still faster because of less contention on the root node lock. We suffer slightly in dbench because we schedule more often during write locks, but all other benchmarks so far are improved. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/tree-log.c')
-rw-r--r--fs/btrfs/tree-log.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 4ce8a9f41d1e..ac278dd83175 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1730,8 +1730,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1730 btrfs_read_buffer(next, ptr_gen); 1730 btrfs_read_buffer(next, ptr_gen);
1731 1731
1732 btrfs_tree_lock(next); 1732 btrfs_tree_lock(next);
1733 clean_tree_block(trans, root, next);
1734 btrfs_set_lock_blocking(next); 1733 btrfs_set_lock_blocking(next);
1734 clean_tree_block(trans, root, next);
1735 btrfs_wait_tree_block_writeback(next); 1735 btrfs_wait_tree_block_writeback(next);
1736 btrfs_tree_unlock(next); 1736 btrfs_tree_unlock(next);
1737 1737
@@ -1796,8 +1796,8 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
1796 next = path->nodes[*level]; 1796 next = path->nodes[*level];
1797 1797
1798 btrfs_tree_lock(next); 1798 btrfs_tree_lock(next);
1799 clean_tree_block(trans, root, next);
1800 btrfs_set_lock_blocking(next); 1799 btrfs_set_lock_blocking(next);
1800 clean_tree_block(trans, root, next);
1801 btrfs_wait_tree_block_writeback(next); 1801 btrfs_wait_tree_block_writeback(next);
1802 btrfs_tree_unlock(next); 1802 btrfs_tree_unlock(next);
1803 1803
@@ -1864,8 +1864,8 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
1864 next = path->nodes[orig_level]; 1864 next = path->nodes[orig_level];
1865 1865
1866 btrfs_tree_lock(next); 1866 btrfs_tree_lock(next);
1867 clean_tree_block(trans, log, next);
1868 btrfs_set_lock_blocking(next); 1867 btrfs_set_lock_blocking(next);
1868 clean_tree_block(trans, log, next);
1869 btrfs_wait_tree_block_writeback(next); 1869 btrfs_wait_tree_block_writeback(next);
1870 btrfs_tree_unlock(next); 1870 btrfs_tree_unlock(next);
1871 1871