aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJeff Mahoney <jeffm@suse.com>2014-05-27 13:53:20 -0400
committerChris Mason <clm@fb.com>2014-06-09 20:21:08 -0400
commit964930312aec583809a690868119ce716f4ee926 (patch)
tree25f6ca23614b5116005d6e6835e567542ae12501 /fs
parent902c68a4da74442f0ab1c0b458f7723a68dfd3b1 (diff)
btrfs: free delayed node outside of root->inode_lock
On heavy workloads, we're seeing soft lockup warnings on root->inode_lock in __btrfs_release_delayed_node. The low hanging fruit is to reduce the size of the critical section. Signed-off-by: Jeff Mahoney <jeffm@suse.com> Reviewed-by: David Sterba <dsterba@suse.cz> Signed-off-by: Chris Mason <clm@fb.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/delayed-inode.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 33e561a84013..da775bfdebc9 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -149,8 +149,8 @@ again:
149 spin_lock(&root->inode_lock); 149 spin_lock(&root->inode_lock);
150 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node); 150 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
151 if (ret == -EEXIST) { 151 if (ret == -EEXIST) {
152 kmem_cache_free(delayed_node_cache, node);
153 spin_unlock(&root->inode_lock); 152 spin_unlock(&root->inode_lock);
153 kmem_cache_free(delayed_node_cache, node);
154 radix_tree_preload_end(); 154 radix_tree_preload_end();
155 goto again; 155 goto again;
156 } 156 }
@@ -267,14 +267,17 @@ static void __btrfs_release_delayed_node(
267 mutex_unlock(&delayed_node->mutex); 267 mutex_unlock(&delayed_node->mutex);
268 268
269 if (atomic_dec_and_test(&delayed_node->refs)) { 269 if (atomic_dec_and_test(&delayed_node->refs)) {
270 bool free = false;
270 struct btrfs_root *root = delayed_node->root; 271 struct btrfs_root *root = delayed_node->root;
271 spin_lock(&root->inode_lock); 272 spin_lock(&root->inode_lock);
272 if (atomic_read(&delayed_node->refs) == 0) { 273 if (atomic_read(&delayed_node->refs) == 0) {
273 radix_tree_delete(&root->delayed_nodes_tree, 274 radix_tree_delete(&root->delayed_nodes_tree,
274 delayed_node->inode_id); 275 delayed_node->inode_id);
275 kmem_cache_free(delayed_node_cache, delayed_node); 276 free = true;
276 } 277 }
277 spin_unlock(&root->inode_lock); 278 spin_unlock(&root->inode_lock);
279 if (free)
280 kmem_cache_free(delayed_node_cache, delayed_node);
278 } 281 }
279} 282}
280 283