aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-08-01 15:11:20 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:06 -0400
commit65b51a009e29e64c0951f21ea17fdc66bbb0fbd7 (patch)
tree800926527fad4c12ca64083816f33be3d716ec13 /fs/btrfs/extent-tree.c
parent18e35e0ab337ec99c7e03e9ae917745a352c0bb1 (diff)
btrfs_search_slot: reduce lock contention by cowing in two stages
A btree block cow has two parts, the first is to allocate a destination block and the second is to copy the old bock over. The first part needs locks in the extent allocation tree, and may need to do IO. This changeset splits that into a separate function that can be called without any tree locks held. btrfs_search_slot is changed to drop its path and start over if it has to COW a contended block. This often means that many writers will pre-alloc a new destination for a the same contended block, but they cache their prealloc for later use on lower levels in the tree. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c49
1 files changed, 30 insertions, 19 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 74bcd48a9c43..98a1c0faedae 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2118,6 +2118,15 @@ again:
2118 return 0; 2118 return 0;
2119} 2119}
2120 2120
2121int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
2122{
2123 maybe_lock_mutex(root);
2124 set_extent_dirty(&root->fs_info->free_space_cache,
2125 start, start + len - 1, GFP_NOFS);
2126 maybe_unlock_mutex(root);
2127 return 0;
2128}
2129
2121int btrfs_reserve_extent(struct btrfs_trans_handle *trans, 2130int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2122 struct btrfs_root *root, 2131 struct btrfs_root *root,
2123 u64 num_bytes, u64 min_alloc_size, 2132 u64 num_bytes, u64 min_alloc_size,
@@ -2267,6 +2276,26 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
2267 maybe_unlock_mutex(root); 2276 maybe_unlock_mutex(root);
2268 return ret; 2277 return ret;
2269} 2278}
2279
2280struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
2281 struct btrfs_root *root,
2282 u64 bytenr, u32 blocksize)
2283{
2284 struct extent_buffer *buf;
2285
2286 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
2287 if (!buf)
2288 return ERR_PTR(-ENOMEM);
2289 btrfs_set_header_generation(buf, trans->transid);
2290 btrfs_tree_lock(buf);
2291 clean_tree_block(trans, root, buf);
2292 btrfs_set_buffer_uptodate(buf);
2293 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
2294 buf->start + buf->len - 1, GFP_NOFS);
2295 trans->blocks_used++;
2296 return buf;
2297}
2298
2270/* 2299/*
2271 * helper function to allocate a block for a given tree 2300 * helper function to allocate a block for a given tree
2272 * returns the tree buffer or NULL. 2301 * returns the tree buffer or NULL.
@@ -2293,26 +2322,8 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
2293 BUG_ON(ret > 0); 2322 BUG_ON(ret > 0);
2294 return ERR_PTR(ret); 2323 return ERR_PTR(ret);
2295 } 2324 }
2296 buf = btrfs_find_create_tree_block(root, ins.objectid, blocksize);
2297 if (!buf) {
2298 btrfs_free_extent(trans, root, ins.objectid, blocksize,
2299 root->root_key.objectid, ref_generation,
2300 0, 0, 0);
2301 return ERR_PTR(-ENOMEM);
2302 }
2303 btrfs_set_header_generation(buf, trans->transid);
2304 btrfs_tree_lock(buf);
2305 clean_tree_block(trans, root, buf);
2306 btrfs_set_buffer_uptodate(buf);
2307
2308 if (PageDirty(buf->first_page)) {
2309 printk("page %lu dirty\n", buf->first_page->index);
2310 WARN_ON(1);
2311 }
2312 2325
2313 set_extent_dirty(&trans->transaction->dirty_pages, buf->start, 2326 buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
2314 buf->start + buf->len - 1, GFP_NOFS);
2315 trans->blocks_used++;
2316 return buf; 2327 return buf;
2317} 2328}
2318 2329