aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-06-25 16:01:30 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:03 -0400
commit5cd57b2cbbb06a350df2698314e4e6a80805fc2f (patch)
treecd20c904dd016ab031af582dadfbd6e04bf4df9e
parent168fd7d271d9d8e81ff0b03eb08c36d82670c8a9 (diff)
Btrfs: Add a skip_locking parameter to struct path, and make various funcs honor it
Allocations may need to read in block groups from the extent allocation tree, which will require a tree search and take locks on the extent allocation tree. But, those locks might already be held in other places, leading to deadlocks. Since the alloc_mutex serializes everything right now, it is safe to skip the btree locking while caching block groups. A better fix will be to either create a recursive lock or find a way to back off existing locks while caching block groups. Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--fs/btrfs/ctree.c32
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/extent-tree.c6
3 files changed, 25 insertions, 14 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index b8f7aecf68d7..fd68601b60b1 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -64,6 +64,7 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
64{ 64{
65 int i; 65 int i;
66 int keep = p->keep_locks; 66 int keep = p->keep_locks;
67 int skip = p->skip_locking;
67 68
68 for (i = 0; i < BTRFS_MAX_LEVEL; i++) { 69 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
69 if (!p->nodes[i]) 70 if (!p->nodes[i])
@@ -76,6 +77,7 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
76 } 77 }
77 memset(p, 0, sizeof(*p)); 78 memset(p, 0, sizeof(*p));
78 p->keep_locks = keep; 79 p->keep_locks = keep;
80 p->skip_locking = skip;
79} 81}
80 82
81struct extent_buffer *btrfs_root_node(struct btrfs_root *root) 83struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
@@ -1262,7 +1264,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1262 if (ins_len < 0) 1264 if (ins_len < 0)
1263 lowest_unlock = 2; 1265 lowest_unlock = 2;
1264again: 1266again:
1265 b = btrfs_lock_root_node(root); 1267 if (p->skip_locking)
1268 b = btrfs_root_node(root);
1269 else
1270 b = btrfs_lock_root_node(root);
1266 1271
1267 while (b) { 1272 while (b) {
1268 level = btrfs_header_level(b); 1273 level = btrfs_header_level(b);
@@ -1282,7 +1287,8 @@ again:
1282 WARN_ON(1); 1287 WARN_ON(1);
1283 level = btrfs_header_level(b); 1288 level = btrfs_header_level(b);
1284 p->nodes[level] = b; 1289 p->nodes[level] = b;
1285 p->locks[level] = 1; 1290 if (!p->skip_locking)
1291 p->locks[level] = 1;
1286 ret = check_block(root, p, level); 1292 ret = check_block(root, p, level);
1287 if (ret) 1293 if (ret)
1288 return -1; 1294 return -1;
@@ -1349,7 +1355,8 @@ again:
1349 b = read_node_slot(root, b, slot); 1355 b = read_node_slot(root, b, slot);
1350 } 1356 }
1351 } 1357 }
1352 btrfs_tree_lock(b); 1358 if (!p->skip_locking)
1359 btrfs_tree_lock(b);
1353 unlock_up(p, level, lowest_unlock); 1360 unlock_up(p, level, lowest_unlock);
1354 } else { 1361 } else {
1355 p->slots[level] = slot; 1362 p->slots[level] = slot;
@@ -1392,13 +1399,6 @@ static int fixup_low_keys(struct btrfs_trans_handle *trans,
1392 break; 1399 break;
1393 t = path->nodes[i]; 1400 t = path->nodes[i];
1394 btrfs_set_node_key(t, key, tslot); 1401 btrfs_set_node_key(t, key, tslot);
1395 if (!btrfs_tree_locked(path->nodes[i])) {
1396 int ii;
1397printk("fixup without lock on level %d\n", btrfs_header_level(path->nodes[i]));
1398 for (ii = 0; ii < BTRFS_MAX_LEVEL; ii++) {
1399printk("level %d slot %d\n", ii, path->slots[ii]);
1400 }
1401 }
1402 btrfs_mark_buffer_dirty(path->nodes[i]); 1402 btrfs_mark_buffer_dirty(path->nodes[i]);
1403 if (tslot != 0) 1403 if (tslot != 0)
1404 break; 1404 break;
@@ -3033,8 +3033,10 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3033 reada_for_search(root, path, level, slot, 0); 3033 reada_for_search(root, path, level, slot, 0);
3034 3034
3035 next = read_node_slot(root, c, slot); 3035 next = read_node_slot(root, c, slot);
3036 WARN_ON(!btrfs_tree_locked(c)); 3036 if (!path->skip_locking) {
3037 btrfs_tree_lock(next); 3037 WARN_ON(!btrfs_tree_locked(c));
3038 btrfs_tree_lock(next);
3039 }
3038 break; 3040 break;
3039 } 3041 }
3040 path->slots[level] = slot; 3042 path->slots[level] = slot;
@@ -3052,8 +3054,10 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3052 if (level == 1 && path->locks[1] && path->reada) 3054 if (level == 1 && path->locks[1] && path->reada)
3053 reada_for_search(root, path, level, slot, 0); 3055 reada_for_search(root, path, level, slot, 0);
3054 next = read_node_slot(root, next, 0); 3056 next = read_node_slot(root, next, 0);
3055 WARN_ON(!btrfs_tree_locked(path->nodes[level])); 3057 if (!path->skip_locking) {
3056 btrfs_tree_lock(next); 3058 WARN_ON(!btrfs_tree_locked(path->nodes[level]));
3059 btrfs_tree_lock(next);
3060 }
3057 } 3061 }
3058done: 3062done:
3059 unlock_up(path, 0, 1); 3063 unlock_up(path, 0, 1);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 9ea12d42741c..e9bbb53eda63 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -335,6 +335,7 @@ struct btrfs_path {
335 int reada; 335 int reada;
336 /* keep some upper locks as we walk down */ 336 /* keep some upper locks as we walk down */
337 int keep_locks; 337 int keep_locks;
338 int skip_locking;
338 int lowest_level; 339 int lowest_level;
339}; 340};
340 341
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 0905653dd3fc..544fc3f2fe6c 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -88,6 +88,12 @@ static int cache_block_group(struct btrfs_root *root,
88 return -ENOMEM; 88 return -ENOMEM;
89 89
90 path->reada = 2; 90 path->reada = 2;
91 /*
92 * we get into deadlocks with paths held by callers of this function.
93 * since the alloc_mutex is protecting things right now, just
94 * skip the locking here
95 */
96 path->skip_locking = 1;
91 first_free = block_group->key.objectid; 97 first_free = block_group->key.objectid;
92 key.objectid = block_group->key.objectid; 98 key.objectid = block_group->key.objectid;
93 key.offset = 0; 99 key.offset = 0;