aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-06-25 16:14:04 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:04 -0400
commitf9efa9c784aa3b801feb367f72c6867d26fb348e (patch)
treed9f678310c6c8a4daf2003fa4f6b4164111dbbd7 /fs/btrfs
parent3f157a2fd2ad731e1ed9964fecdc5f459f04a4a4 (diff)
Btrfs: Reduce contention on the root node
This calls unlock_up sooner in btrfs_search_slot in order to decrease the amount of work done with the higher level tree locks held. Also, it changes btrfs_tree_lock to spin for a big against the page lock before scheduling. This makes a big difference in context switch rate under highly contended workloads. Longer term, a better locking structure is needed than the page lock. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/ctree.c17
-rw-r--r--fs/btrfs/locking.c10
2 files changed, 21 insertions, 6 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0cb80f32a9c7..c6759fc1004a 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1313,16 +1313,13 @@ again:
1313 slot = p->slots[level]; 1313 slot = p->slots[level];
1314 BUG_ON(btrfs_header_nritems(b) == 1); 1314 BUG_ON(btrfs_header_nritems(b) == 1);
1315 } 1315 }
1316 unlock_up(p, level, lowest_unlock);
1317
1316 /* this is only true while dropping a snapshot */ 1318 /* this is only true while dropping a snapshot */
1317 if (level == lowest_level) { 1319 if (level == lowest_level) {
1318 unlock_up(p, level, lowest_unlock);
1319 break; 1320 break;
1320 } 1321 }
1321 1322
1322 if (should_reada)
1323 reada_for_search(root, p, level, slot,
1324 key->objectid);
1325
1326 blocknr = btrfs_node_blockptr(b, slot); 1323 blocknr = btrfs_node_blockptr(b, slot);
1327 gen = btrfs_node_ptr_generation(b, slot); 1324 gen = btrfs_node_ptr_generation(b, slot);
1328 blocksize = btrfs_level_size(root, level - 1); 1325 blocksize = btrfs_level_size(root, level - 1);
@@ -1340,6 +1337,11 @@ again:
1340 btrfs_release_path(NULL, p); 1337 btrfs_release_path(NULL, p);
1341 if (tmp) 1338 if (tmp)
1342 free_extent_buffer(tmp); 1339 free_extent_buffer(tmp);
1340 if (should_reada)
1341 reada_for_search(root, p,
1342 level, slot,
1343 key->objectid);
1344
1343 tmp = read_tree_block(root, blocknr, 1345 tmp = read_tree_block(root, blocknr,
1344 blocksize, gen); 1346 blocksize, gen);
1345 if (tmp) 1347 if (tmp)
@@ -1348,12 +1350,15 @@ again:
1348 } else { 1350 } else {
1349 if (tmp) 1351 if (tmp)
1350 free_extent_buffer(tmp); 1352 free_extent_buffer(tmp);
1353 if (should_reada)
1354 reada_for_search(root, p,
1355 level, slot,
1356 key->objectid);
1351 b = read_node_slot(root, b, slot); 1357 b = read_node_slot(root, b, slot);
1352 } 1358 }
1353 } 1359 }
1354 if (!p->skip_locking) 1360 if (!p->skip_locking)
1355 btrfs_tree_lock(b); 1361 btrfs_tree_lock(b);
1356 unlock_up(p, level, lowest_unlock);
1357 } else { 1362 } else {
1358 p->slots[level] = slot; 1363 p->slots[level] = slot;
1359 if (ins_len > 0 && btrfs_leaf_free_space(root, b) < 1364 if (ins_len > 0 && btrfs_leaf_free_space(root, b) <
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 80813a307b4b..058a506a0dd8 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -27,6 +27,16 @@
27 27
28int btrfs_tree_lock(struct extent_buffer *eb) 28int btrfs_tree_lock(struct extent_buffer *eb)
29{ 29{
30 int i;
31
32 if (!TestSetPageLocked(eb->first_page))
33 return 0;
34 for (i = 0; i < 512; i++) {
35 cpu_relax();
36 if (!TestSetPageLocked(eb->first_page))
37 return 0;
38 }
39 cpu_relax();
30 lock_page(eb->first_page); 40 lock_page(eb->first_page);
31 return 0; 41 return 0;
32} 42}