aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-06-25 16:01:30 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:03 -0400
commit333db94cdde9e6dfdedab9290d04d812f83e0922 (patch)
treef4925bc36a8f4177d5a4f3a303ae47f3ea592b60 /fs/btrfs/extent-tree.c
parent5cd57b2cbbb06a350df2698314e4e6a80805fc2f (diff)
Btrfs: Fix snapshot deletion to release the alloc_mutex much more often.
This lowers the impact of snapshot deletion on the rest of the FS. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c28
1 files changed, 20 insertions, 8 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 544fc3f2fe6c..6274f30031db 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1223,8 +1223,8 @@ printk("space info full %Lu\n", flags);
1223 ret = btrfs_make_block_group(trans, extent_root, 0, flags, 1223 ret = btrfs_make_block_group(trans, extent_root, 0, flags,
1224 BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes); 1224 BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
1225 BUG_ON(ret); 1225 BUG_ON(ret);
1226 mutex_unlock(&extent_root->fs_info->chunk_mutex);
1227out: 1226out:
1227 mutex_unlock(&extent_root->fs_info->chunk_mutex);
1228 return 0; 1228 return 0;
1229} 1229}
1230 1230
@@ -2181,18 +2181,30 @@ static void noinline reada_walk_down(struct btrfs_root *root,
2181 continue; 2181 continue;
2182 } 2182 }
2183 } 2183 }
2184 mutex_unlock(&root->fs_info->alloc_mutex);
2185 ret = readahead_tree_block(root, bytenr, blocksize, 2184 ret = readahead_tree_block(root, bytenr, blocksize,
2186 btrfs_node_ptr_generation(node, i)); 2185 btrfs_node_ptr_generation(node, i));
2187 last = bytenr + blocksize; 2186 last = bytenr + blocksize;
2188 cond_resched(); 2187 cond_resched();
2189 mutex_lock(&root->fs_info->alloc_mutex);
2190 if (ret) 2188 if (ret)
2191 break; 2189 break;
2192 } 2190 }
2193} 2191}
2194 2192
2195/* 2193/*
2194 * we want to avoid as much random IO as we can with the alloc mutex
2195 * held, so drop the lock and do the lookup, then do it again with the
2196 * lock held.
2197 */
2198int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
2199 u32 *refs)
2200{
2201 mutex_unlock(&root->fs_info->alloc_mutex);
2202 lookup_extent_ref(NULL, root, start, len, refs);
2203 mutex_lock(&root->fs_info->alloc_mutex);
2204 return lookup_extent_ref(NULL, root, start, len, refs);
2205}
2206
2207/*
2196 * helper function for drop_snapshot, this walks down the tree dropping ref 2208 * helper function for drop_snapshot, this walks down the tree dropping ref
2197 * counts as it goes. 2209 * counts as it goes.
2198 */ 2210 */
@@ -2215,8 +2227,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
2215 2227
2216 WARN_ON(*level < 0); 2228 WARN_ON(*level < 0);
2217 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2229 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2218 ret = lookup_extent_ref(trans, root, 2230 ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
2219 path->nodes[*level]->start,
2220 path->nodes[*level]->len, &refs); 2231 path->nodes[*level]->len, &refs);
2221 BUG_ON(ret); 2232 BUG_ON(ret);
2222 if (refs > 1) 2233 if (refs > 1)
@@ -2245,7 +2256,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
2245 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 2256 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2246 blocksize = btrfs_level_size(root, *level - 1); 2257 blocksize = btrfs_level_size(root, *level - 1);
2247 2258
2248 ret = lookup_extent_ref(trans, root, bytenr, blocksize, &refs); 2259 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
2249 BUG_ON(ret); 2260 BUG_ON(ret);
2250 if (refs != 1) { 2261 if (refs != 1) {
2251 parent = path->nodes[*level]; 2262 parent = path->nodes[*level];
@@ -2261,15 +2272,16 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
2261 next = btrfs_find_tree_block(root, bytenr, blocksize); 2272 next = btrfs_find_tree_block(root, bytenr, blocksize);
2262 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) { 2273 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
2263 free_extent_buffer(next); 2274 free_extent_buffer(next);
2275 mutex_unlock(&root->fs_info->alloc_mutex);
2276
2264 reada_walk_down(root, cur, path->slots[*level]); 2277 reada_walk_down(root, cur, path->slots[*level]);
2265 2278
2266 mutex_unlock(&root->fs_info->alloc_mutex);
2267 next = read_tree_block(root, bytenr, blocksize, 2279 next = read_tree_block(root, bytenr, blocksize,
2268 ptr_gen); 2280 ptr_gen);
2269 mutex_lock(&root->fs_info->alloc_mutex); 2281 mutex_lock(&root->fs_info->alloc_mutex);
2270 2282
2271 /* we've dropped the lock, double check */ 2283 /* we've dropped the lock, double check */
2272 ret = lookup_extent_ref(trans, root, bytenr, 2284 ret = drop_snap_lookup_refcount(root, bytenr,
2273 blocksize, &refs); 2285 blocksize, &refs);
2274 BUG_ON(ret); 2286 BUG_ON(ret);
2275 if (refs != 1) { 2287 if (refs != 1) {