aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-11-11 16:38:59 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-11-11 16:38:59 -0500
commitaa021baa3295fa6e3f367d80f8955dd5176656eb (patch)
tree13da8275b1957399bc4fa19ec7cc313d48694e31 /fs/btrfs
parent404291ac9e72d118fcadeb939a69b2caa0a0e9ca (diff)
parenta6dbd429d8dd3382bbd9594b8d2ec74843a260d9 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: Btrfs: fix panic when trying to destroy a newly allocated Btrfs: allow more metadata chunk preallocation Btrfs: fallback on uncompressed io if compressed io fails Btrfs: find ideal block group for caching Btrfs: avoid null deref in unpin_extent_cache() Btrfs: skip btrfs_release_path in btrfs_update_root and btrfs_del_root Btrfs: fix some metadata enospc issues Btrfs: fix how we set max_size for free space clusters Btrfs: cleanup transaction starting and fix journal_info usage Btrfs: fix data allocation hint start
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/extent-tree.c113
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/inode.c95
-rw-r--r--fs/btrfs/root-tree.c2
-rw-r--r--fs/btrfs/transaction.c19
6 files changed, 183 insertions, 50 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e238a0cdac67..94627c4cc193 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2977,10 +2977,10 @@ static int maybe_allocate_chunk(struct btrfs_root *root,
2977 2977
2978 free_space = btrfs_super_total_bytes(disk_super); 2978 free_space = btrfs_super_total_bytes(disk_super);
2979 /* 2979 /*
2980 * we allow the metadata to grow to a max of either 5gb or 5% of the 2980 * we allow the metadata to grow to a max of either 10gb or 5% of the
2981 * space in the volume. 2981 * space in the volume.
2982 */ 2982 */
2983 min_metadata = min((u64)5 * 1024 * 1024 * 1024, 2983 min_metadata = min((u64)10 * 1024 * 1024 * 1024,
2984 div64_u64(free_space * 5, 100)); 2984 div64_u64(free_space * 5, 100));
2985 if (info->total_bytes >= min_metadata) { 2985 if (info->total_bytes >= min_metadata) {
2986 spin_unlock(&info->lock); 2986 spin_unlock(&info->lock);
@@ -4102,7 +4102,7 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4102} 4102}
4103 4103
4104enum btrfs_loop_type { 4104enum btrfs_loop_type {
4105 LOOP_CACHED_ONLY = 0, 4105 LOOP_FIND_IDEAL = 0,
4106 LOOP_CACHING_NOWAIT = 1, 4106 LOOP_CACHING_NOWAIT = 1,
4107 LOOP_CACHING_WAIT = 2, 4107 LOOP_CACHING_WAIT = 2,
4108 LOOP_ALLOC_CHUNK = 3, 4108 LOOP_ALLOC_CHUNK = 3,
@@ -4131,12 +4131,15 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4131 struct btrfs_block_group_cache *block_group = NULL; 4131 struct btrfs_block_group_cache *block_group = NULL;
4132 int empty_cluster = 2 * 1024 * 1024; 4132 int empty_cluster = 2 * 1024 * 1024;
4133 int allowed_chunk_alloc = 0; 4133 int allowed_chunk_alloc = 0;
4134 int done_chunk_alloc = 0;
4134 struct btrfs_space_info *space_info; 4135 struct btrfs_space_info *space_info;
4135 int last_ptr_loop = 0; 4136 int last_ptr_loop = 0;
4136 int loop = 0; 4137 int loop = 0;
4137 bool found_uncached_bg = false; 4138 bool found_uncached_bg = false;
4138 bool failed_cluster_refill = false; 4139 bool failed_cluster_refill = false;
4139 bool failed_alloc = false; 4140 bool failed_alloc = false;
4141 u64 ideal_cache_percent = 0;
4142 u64 ideal_cache_offset = 0;
4140 4143
4141 WARN_ON(num_bytes < root->sectorsize); 4144 WARN_ON(num_bytes < root->sectorsize);
4142 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); 4145 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -4172,14 +4175,19 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4172 empty_cluster = 0; 4175 empty_cluster = 0;
4173 4176
4174 if (search_start == hint_byte) { 4177 if (search_start == hint_byte) {
4178ideal_cache:
4175 block_group = btrfs_lookup_block_group(root->fs_info, 4179 block_group = btrfs_lookup_block_group(root->fs_info,
4176 search_start); 4180 search_start);
4177 /* 4181 /*
4178 * we don't want to use the block group if it doesn't match our 4182 * we don't want to use the block group if it doesn't match our
4179 * allocation bits, or if its not cached. 4183 * allocation bits, or if its not cached.
4184 *
4185 * However if we are re-searching with an ideal block group
4186 * picked out then we don't care that the block group is cached.
4180 */ 4187 */
4181 if (block_group && block_group_bits(block_group, data) && 4188 if (block_group && block_group_bits(block_group, data) &&
4182 block_group_cache_done(block_group)) { 4189 (block_group->cached != BTRFS_CACHE_NO ||
4190 search_start == ideal_cache_offset)) {
4183 down_read(&space_info->groups_sem); 4191 down_read(&space_info->groups_sem);
4184 if (list_empty(&block_group->list) || 4192 if (list_empty(&block_group->list) ||
4185 block_group->ro) { 4193 block_group->ro) {
@@ -4191,13 +4199,13 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4191 */ 4199 */
4192 btrfs_put_block_group(block_group); 4200 btrfs_put_block_group(block_group);
4193 up_read(&space_info->groups_sem); 4201 up_read(&space_info->groups_sem);
4194 } else 4202 } else {
4195 goto have_block_group; 4203 goto have_block_group;
4204 }
4196 } else if (block_group) { 4205 } else if (block_group) {
4197 btrfs_put_block_group(block_group); 4206 btrfs_put_block_group(block_group);
4198 } 4207 }
4199 } 4208 }
4200
4201search: 4209search:
4202 down_read(&space_info->groups_sem); 4210 down_read(&space_info->groups_sem);
4203 list_for_each_entry(block_group, &space_info->block_groups, list) { 4211 list_for_each_entry(block_group, &space_info->block_groups, list) {
@@ -4209,28 +4217,45 @@ search:
4209 4217
4210have_block_group: 4218have_block_group:
4211 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { 4219 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4220 u64 free_percent;
4221
4222 free_percent = btrfs_block_group_used(&block_group->item);
4223 free_percent *= 100;
4224 free_percent = div64_u64(free_percent,
4225 block_group->key.offset);
4226 free_percent = 100 - free_percent;
4227 if (free_percent > ideal_cache_percent &&
4228 likely(!block_group->ro)) {
4229 ideal_cache_offset = block_group->key.objectid;
4230 ideal_cache_percent = free_percent;
4231 }
4232
4212 /* 4233 /*
4213 * we want to start caching kthreads, but not too many 4234 * We only want to start kthread caching if we are at
4214 * right off the bat so we don't overwhelm the system, 4235 * the point where we will wait for caching to make
4215 * so only start them if there are less than 2 and we're 4236 * progress, or if our ideal search is over and we've
4216 * in the initial allocation phase. 4237 * found somebody to start caching.
4217 */ 4238 */
4218 if (loop > LOOP_CACHING_NOWAIT || 4239 if (loop > LOOP_CACHING_NOWAIT ||
4219 atomic_read(&space_info->caching_threads) < 2) { 4240 (loop > LOOP_FIND_IDEAL &&
4241 atomic_read(&space_info->caching_threads) < 2)) {
4220 ret = cache_block_group(block_group); 4242 ret = cache_block_group(block_group);
4221 BUG_ON(ret); 4243 BUG_ON(ret);
4222 } 4244 }
4223 }
4224
4225 cached = block_group_cache_done(block_group);
4226 if (unlikely(!cached)) {
4227 found_uncached_bg = true; 4245 found_uncached_bg = true;
4228 4246
4229 /* if we only want cached bgs, loop */ 4247 /*
4230 if (loop == LOOP_CACHED_ONLY) 4248 * If loop is set for cached only, try the next block
4249 * group.
4250 */
4251 if (loop == LOOP_FIND_IDEAL)
4231 goto loop; 4252 goto loop;
4232 } 4253 }
4233 4254
4255 cached = block_group_cache_done(block_group);
4256 if (unlikely(!cached))
4257 found_uncached_bg = true;
4258
4234 if (unlikely(block_group->ro)) 4259 if (unlikely(block_group->ro))
4235 goto loop; 4260 goto loop;
4236 4261
@@ -4410,9 +4435,11 @@ loop:
4410 } 4435 }
4411 up_read(&space_info->groups_sem); 4436 up_read(&space_info->groups_sem);
4412 4437
4413 /* LOOP_CACHED_ONLY, only search fully cached block groups 4438 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
4414 * LOOP_CACHING_NOWAIT, search partially cached block groups, but 4439 * for them to make caching progress. Also
4415 * dont wait foR them to finish caching 4440 * determine the best possible bg to cache
4441 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
4442 * caching kthreads as we move along
4416 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching 4443 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
4417 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again 4444 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
4418 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try 4445 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
@@ -4421,12 +4448,47 @@ loop:
4421 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && 4448 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
4422 (found_uncached_bg || empty_size || empty_cluster || 4449 (found_uncached_bg || empty_size || empty_cluster ||
4423 allowed_chunk_alloc)) { 4450 allowed_chunk_alloc)) {
4424 if (found_uncached_bg) { 4451 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
4425 found_uncached_bg = false; 4452 found_uncached_bg = false;
4426 if (loop < LOOP_CACHING_WAIT) { 4453 loop++;
4427 loop++; 4454 if (!ideal_cache_percent &&
4455 atomic_read(&space_info->caching_threads))
4428 goto search; 4456 goto search;
4429 } 4457
4458 /*
4459 * 1 of the following 2 things have happened so far
4460 *
4461 * 1) We found an ideal block group for caching that
4462 * is mostly full and will cache quickly, so we might
4463 * as well wait for it.
4464 *
4465 * 2) We searched for cached only and we didn't find
4466 * anything, and we didn't start any caching kthreads
4467 * either, so chances are we will loop through and
4468 * start a couple caching kthreads, and then come back
4469 * around and just wait for them. This will be slower
4470 * because we will have 2 caching kthreads reading at
4471 * the same time when we could have just started one
4472 * and waited for it to get far enough to give us an
4473 * allocation, so go ahead and go to the wait caching
4474 * loop.
4475 */
4476 loop = LOOP_CACHING_WAIT;
4477 search_start = ideal_cache_offset;
4478 ideal_cache_percent = 0;
4479 goto ideal_cache;
4480 } else if (loop == LOOP_FIND_IDEAL) {
4481 /*
4482 * Didn't find a uncached bg, wait on anything we find
4483 * next.
4484 */
4485 loop = LOOP_CACHING_WAIT;
4486 goto search;
4487 }
4488
4489 if (loop < LOOP_CACHING_WAIT) {
4490 loop++;
4491 goto search;
4430 } 4492 }
4431 4493
4432 if (loop == LOOP_ALLOC_CHUNK) { 4494 if (loop == LOOP_ALLOC_CHUNK) {
@@ -4438,7 +4500,8 @@ loop:
4438 ret = do_chunk_alloc(trans, root, num_bytes + 4500 ret = do_chunk_alloc(trans, root, num_bytes +
4439 2 * 1024 * 1024, data, 1); 4501 2 * 1024 * 1024, data, 1);
4440 allowed_chunk_alloc = 0; 4502 allowed_chunk_alloc = 0;
4441 } else { 4503 done_chunk_alloc = 1;
4504 } else if (!done_chunk_alloc) {
4442 space_info->force_alloc = 1; 4505 space_info->force_alloc = 1;
4443 } 4506 }
4444 4507
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 2c726b7b9faa..ccbdcb54ec5d 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -208,7 +208,7 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
208 write_lock(&tree->lock); 208 write_lock(&tree->lock);
209 em = lookup_extent_mapping(tree, start, len); 209 em = lookup_extent_mapping(tree, start, len);
210 210
211 WARN_ON(em->start != start || !em); 211 WARN_ON(!em || em->start != start);
212 212
213 if (!em) 213 if (!em)
214 goto out; 214 goto out;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 5c2caad76212..cb2849f03251 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1296,7 +1296,7 @@ again:
1296 window_start = entry->offset; 1296 window_start = entry->offset;
1297 window_free = entry->bytes; 1297 window_free = entry->bytes;
1298 last = entry; 1298 last = entry;
1299 max_extent = 0; 1299 max_extent = entry->bytes;
1300 } else { 1300 } else {
1301 last = next; 1301 last = next;
1302 window_free += next->bytes; 1302 window_free += next->bytes;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index dae12dc7e159..b3ad168a0bfc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -538,7 +538,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
538 struct btrfs_root *root = BTRFS_I(inode)->root; 538 struct btrfs_root *root = BTRFS_I(inode)->root;
539 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 539 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
540 struct extent_io_tree *io_tree; 540 struct extent_io_tree *io_tree;
541 int ret; 541 int ret = 0;
542 542
543 if (list_empty(&async_cow->extents)) 543 if (list_empty(&async_cow->extents))
544 return 0; 544 return 0;
@@ -552,6 +552,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
552 552
553 io_tree = &BTRFS_I(inode)->io_tree; 553 io_tree = &BTRFS_I(inode)->io_tree;
554 554
555retry:
555 /* did the compression code fall back to uncompressed IO? */ 556 /* did the compression code fall back to uncompressed IO? */
556 if (!async_extent->pages) { 557 if (!async_extent->pages) {
557 int page_started = 0; 558 int page_started = 0;
@@ -562,11 +563,11 @@ static noinline int submit_compressed_extents(struct inode *inode,
562 async_extent->ram_size - 1, GFP_NOFS); 563 async_extent->ram_size - 1, GFP_NOFS);
563 564
564 /* allocate blocks */ 565 /* allocate blocks */
565 cow_file_range(inode, async_cow->locked_page, 566 ret = cow_file_range(inode, async_cow->locked_page,
566 async_extent->start, 567 async_extent->start,
567 async_extent->start + 568 async_extent->start +
568 async_extent->ram_size - 1, 569 async_extent->ram_size - 1,
569 &page_started, &nr_written, 0); 570 &page_started, &nr_written, 0);
570 571
571 /* 572 /*
572 * if page_started, cow_file_range inserted an 573 * if page_started, cow_file_range inserted an
@@ -574,7 +575,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
574 * and IO for us. Otherwise, we need to submit 575 * and IO for us. Otherwise, we need to submit
575 * all those pages down to the drive. 576 * all those pages down to the drive.
576 */ 577 */
577 if (!page_started) 578 if (!page_started && !ret)
578 extent_write_locked_range(io_tree, 579 extent_write_locked_range(io_tree,
579 inode, async_extent->start, 580 inode, async_extent->start,
580 async_extent->start + 581 async_extent->start +
@@ -602,7 +603,21 @@ static noinline int submit_compressed_extents(struct inode *inode,
602 async_extent->compressed_size, 603 async_extent->compressed_size,
603 0, alloc_hint, 604 0, alloc_hint,
604 (u64)-1, &ins, 1); 605 (u64)-1, &ins, 1);
605 BUG_ON(ret); 606 if (ret) {
607 int i;
608 for (i = 0; i < async_extent->nr_pages; i++) {
609 WARN_ON(async_extent->pages[i]->mapping);
610 page_cache_release(async_extent->pages[i]);
611 }
612 kfree(async_extent->pages);
613 async_extent->nr_pages = 0;
614 async_extent->pages = NULL;
615 unlock_extent(io_tree, async_extent->start,
616 async_extent->start +
617 async_extent->ram_size - 1, GFP_NOFS);
618 goto retry;
619 }
620
606 em = alloc_extent_map(GFP_NOFS); 621 em = alloc_extent_map(GFP_NOFS);
607 em->start = async_extent->start; 622 em->start = async_extent->start;
608 em->len = async_extent->ram_size; 623 em->len = async_extent->ram_size;
@@ -743,8 +758,22 @@ static noinline int cow_file_range(struct inode *inode,
743 em = search_extent_mapping(&BTRFS_I(inode)->extent_tree, 758 em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
744 start, num_bytes); 759 start, num_bytes);
745 if (em) { 760 if (em) {
746 alloc_hint = em->block_start; 761 /*
747 free_extent_map(em); 762 * if block start isn't an actual block number then find the
763 * first block in this inode and use that as a hint. If that
764 * block is also bogus then just don't worry about it.
765 */
766 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
767 free_extent_map(em);
768 em = search_extent_mapping(em_tree, 0, 0);
769 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
770 alloc_hint = em->block_start;
771 if (em)
772 free_extent_map(em);
773 } else {
774 alloc_hint = em->block_start;
775 free_extent_map(em);
776 }
748 } 777 }
749 read_unlock(&BTRFS_I(inode)->extent_tree.lock); 778 read_unlock(&BTRFS_I(inode)->extent_tree.lock);
750 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); 779 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
@@ -2474,7 +2503,19 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2474 2503
2475 root = BTRFS_I(dir)->root; 2504 root = BTRFS_I(dir)->root;
2476 2505
2506 /*
2507 * 5 items for unlink inode
2508 * 1 for orphan
2509 */
2510 ret = btrfs_reserve_metadata_space(root, 6);
2511 if (ret)
2512 return ret;
2513
2477 trans = btrfs_start_transaction(root, 1); 2514 trans = btrfs_start_transaction(root, 1);
2515 if (IS_ERR(trans)) {
2516 btrfs_unreserve_metadata_space(root, 6);
2517 return PTR_ERR(trans);
2518 }
2478 2519
2479 btrfs_set_trans_block_group(trans, dir); 2520 btrfs_set_trans_block_group(trans, dir);
2480 2521
@@ -2489,6 +2530,7 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2489 nr = trans->blocks_used; 2530 nr = trans->blocks_used;
2490 2531
2491 btrfs_end_transaction_throttle(trans, root); 2532 btrfs_end_transaction_throttle(trans, root);
2533 btrfs_unreserve_metadata_space(root, 6);
2492 btrfs_btree_balance_dirty(root, nr); 2534 btrfs_btree_balance_dirty(root, nr);
2493 return ret; 2535 return ret;
2494} 2536}
@@ -2569,7 +2611,16 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2569 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 2611 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
2570 return -ENOTEMPTY; 2612 return -ENOTEMPTY;
2571 2613
2614 ret = btrfs_reserve_metadata_space(root, 5);
2615 if (ret)
2616 return ret;
2617
2572 trans = btrfs_start_transaction(root, 1); 2618 trans = btrfs_start_transaction(root, 1);
2619 if (IS_ERR(trans)) {
2620 btrfs_unreserve_metadata_space(root, 5);
2621 return PTR_ERR(trans);
2622 }
2623
2573 btrfs_set_trans_block_group(trans, dir); 2624 btrfs_set_trans_block_group(trans, dir);
2574 2625
2575 if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 2626 if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
@@ -2592,6 +2643,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2592out: 2643out:
2593 nr = trans->blocks_used; 2644 nr = trans->blocks_used;
2594 ret = btrfs_end_transaction_throttle(trans, root); 2645 ret = btrfs_end_transaction_throttle(trans, root);
2646 btrfs_unreserve_metadata_space(root, 5);
2595 btrfs_btree_balance_dirty(root, nr); 2647 btrfs_btree_balance_dirty(root, nr);
2596 2648
2597 if (ret && !err) 2649 if (ret && !err)
@@ -5128,6 +5180,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
5128 ei->logged_trans = 0; 5180 ei->logged_trans = 0;
5129 ei->outstanding_extents = 0; 5181 ei->outstanding_extents = 0;
5130 ei->reserved_extents = 0; 5182 ei->reserved_extents = 0;
5183 ei->root = NULL;
5131 spin_lock_init(&ei->accounting_lock); 5184 spin_lock_init(&ei->accounting_lock);
5132 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 5185 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
5133 INIT_LIST_HEAD(&ei->i_orphan); 5186 INIT_LIST_HEAD(&ei->i_orphan);
@@ -5144,6 +5197,14 @@ void btrfs_destroy_inode(struct inode *inode)
5144 WARN_ON(inode->i_data.nrpages); 5197 WARN_ON(inode->i_data.nrpages);
5145 5198
5146 /* 5199 /*
5200 * This can happen where we create an inode, but somebody else also
5201 * created the same inode and we need to destroy the one we already
5202 * created.
5203 */
5204 if (!root)
5205 goto free;
5206
5207 /*
5147 * Make sure we're properly removed from the ordered operation 5208 * Make sure we're properly removed from the ordered operation
5148 * lists. 5209 * lists.
5149 */ 5210 */
@@ -5178,6 +5239,7 @@ void btrfs_destroy_inode(struct inode *inode)
5178 } 5239 }
5179 inode_tree_del(inode); 5240 inode_tree_del(inode);
5180 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 5241 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
5242free:
5181 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); 5243 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
5182} 5244}
5183 5245
@@ -5283,11 +5345,14 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5283 return -ENOTEMPTY; 5345 return -ENOTEMPTY;
5284 5346
5285 /* 5347 /*
5286 * 2 items for dir items 5348 * We want to reserve the absolute worst case amount of items. So if
5287 * 1 item for orphan entry 5349 * both inodes are subvols and we need to unlink them then that would
5288 * 1 item for ref 5350 * require 4 item modifications, but if they are both normal inodes it
5351 * would require 5 item modifications, so we'll assume their normal
5352 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
5353 * should cover the worst case number of items we'll modify.
5289 */ 5354 */
5290 ret = btrfs_reserve_metadata_space(root, 4); 5355 ret = btrfs_reserve_metadata_space(root, 11);
5291 if (ret) 5356 if (ret)
5292 return ret; 5357 return ret;
5293 5358
@@ -5403,7 +5468,7 @@ out_fail:
5403 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 5468 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5404 up_read(&root->fs_info->subvol_sem); 5469 up_read(&root->fs_info->subvol_sem);
5405 5470
5406 btrfs_unreserve_metadata_space(root, 4); 5471 btrfs_unreserve_metadata_space(root, 11);
5407 return ret; 5472 return ret;
5408} 5473}
5409 5474
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 9351428f30e2..67fa2d29d663 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -159,7 +159,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
159 write_extent_buffer(l, item, ptr, sizeof(*item)); 159 write_extent_buffer(l, item, ptr, sizeof(*item));
160 btrfs_mark_buffer_dirty(path->nodes[0]); 160 btrfs_mark_buffer_dirty(path->nodes[0]);
161out: 161out:
162 btrfs_release_path(root, path);
163 btrfs_free_path(path); 162 btrfs_free_path(path);
164 return ret; 163 return ret;
165} 164}
@@ -332,7 +331,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
332 BUG_ON(refs != 0); 331 BUG_ON(refs != 0);
333 ret = btrfs_del_item(trans, root, path); 332 ret = btrfs_del_item(trans, root, path);
334out: 333out:
335 btrfs_release_path(root, path);
336 btrfs_free_path(path); 334 btrfs_free_path(path);
337 return ret; 335 return ret;
338} 336}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index bca82a4ca8e6..c207e8c32c9b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -163,8 +163,14 @@ static void wait_current_trans(struct btrfs_root *root)
163 } 163 }
164} 164}
165 165
166enum btrfs_trans_type {
167 TRANS_START,
168 TRANS_JOIN,
169 TRANS_USERSPACE,
170};
171
166static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, 172static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
167 int num_blocks, int wait) 173 int num_blocks, int type)
168{ 174{
169 struct btrfs_trans_handle *h = 175 struct btrfs_trans_handle *h =
170 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 176 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
@@ -172,7 +178,8 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
172 178
173 mutex_lock(&root->fs_info->trans_mutex); 179 mutex_lock(&root->fs_info->trans_mutex);
174 if (!root->fs_info->log_root_recovering && 180 if (!root->fs_info->log_root_recovering &&
175 ((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2)) 181 ((type == TRANS_START && !root->fs_info->open_ioctl_trans) ||
182 type == TRANS_USERSPACE))
176 wait_current_trans(root); 183 wait_current_trans(root);
177 ret = join_transaction(root); 184 ret = join_transaction(root);
178 BUG_ON(ret); 185 BUG_ON(ret);
@@ -186,7 +193,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
186 h->alloc_exclude_start = 0; 193 h->alloc_exclude_start = 0;
187 h->delayed_ref_updates = 0; 194 h->delayed_ref_updates = 0;
188 195
189 if (!current->journal_info) 196 if (!current->journal_info && type != TRANS_USERSPACE)
190 current->journal_info = h; 197 current->journal_info = h;
191 198
192 root->fs_info->running_transaction->use_count++; 199 root->fs_info->running_transaction->use_count++;
@@ -198,18 +205,18 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
198struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 205struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
199 int num_blocks) 206 int num_blocks)
200{ 207{
201 return start_transaction(root, num_blocks, 1); 208 return start_transaction(root, num_blocks, TRANS_START);
202} 209}
203struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, 210struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
204 int num_blocks) 211 int num_blocks)
205{ 212{
206 return start_transaction(root, num_blocks, 0); 213 return start_transaction(root, num_blocks, TRANS_JOIN);
207} 214}
208 215
209struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, 216struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
210 int num_blocks) 217 int num_blocks)
211{ 218{
212 return start_transaction(r, num_blocks, 2); 219 return start_transaction(r, num_blocks, TRANS_USERSPACE);
213} 220}
214 221
215/* wait for a transaction commit to be fully complete */ 222/* wait for a transaction commit to be fully complete */