diff options
author | Li Zefan <lizf@cn.fujitsu.com> | 2011-04-19 22:06:11 -0400 |
---|---|---|
committer | Li Zefan <lizf@cn.fujitsu.com> | 2011-04-25 04:46:04 -0400 |
commit | 581bb050941b4f220f84d3e5ed6dace3d42dd382 (patch) | |
tree | 5ebd56af5eb3612f508419b188dfc18e959e7c94 | |
parent | 34d52cb6c50b5a43901709998f59fb1c5a43dc4a (diff) |
Btrfs: Cache free inode numbers in memory
Currently btrfs stores the highest objectid of the fs tree, and it always
returns (highest+1) inode number when we create a file, so inode numbers
won't be reclaimed when we delete files, so we'll run out of inode numbers
as we keep create/delete files in 32bits machines.
This fixes it, and it works similarly to how we cache free space in block
cgroups.
We start a kernel thread to read the file tree. By scanning inode items,
we know which chunks of inode numbers are free, and we cache them in
an rb-tree.
Because we are searching the commit root, we have to carefully handle the
cross-transaction case.
The rb-tree is a hybrid extent+bitmap tree, so if we have too many small
chunks of inode numbers, we'll use bitmaps. Initially we allow 16K ram
of extents, and a bitmap will be used if we exceed this threshold. The
extents threshold is adjusted in runtime.
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
-rw-r--r-- | fs/btrfs/ctree.h | 15 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 18 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.c | 96 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.h | 16 | ||||
-rw-r--r-- | fs/btrfs/inode-map.c | 341 | ||||
-rw-r--r-- | fs/btrfs/inode-map.h | 11 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 42 | ||||
-rw-r--r-- | fs/btrfs/ioctl.c | 4 | ||||
-rw-r--r-- | fs/btrfs/relocation.c | 3 | ||||
-rw-r--r-- | fs/btrfs/transaction.c | 7 |
10 files changed, 500 insertions, 53 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index d17e4a3b8bf7..c96a4e4c5566 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -1102,6 +1102,15 @@ struct btrfs_root { | |||
1102 | spinlock_t accounting_lock; | 1102 | spinlock_t accounting_lock; |
1103 | struct btrfs_block_rsv *block_rsv; | 1103 | struct btrfs_block_rsv *block_rsv; |
1104 | 1104 | ||
1105 | /* free ino cache stuff */ | ||
1106 | struct mutex fs_commit_mutex; | ||
1107 | struct btrfs_free_space_ctl *free_ino_ctl; | ||
1108 | enum btrfs_caching_type cached; | ||
1109 | spinlock_t cache_lock; | ||
1110 | wait_queue_head_t cache_wait; | ||
1111 | struct btrfs_free_space_ctl *free_ino_pinned; | ||
1112 | u64 cache_progress; | ||
1113 | |||
1105 | struct mutex log_mutex; | 1114 | struct mutex log_mutex; |
1106 | wait_queue_head_t log_writer_wait; | 1115 | wait_queue_head_t log_writer_wait; |
1107 | wait_queue_head_t log_commit_wait[2]; | 1116 | wait_queue_head_t log_commit_wait[2]; |
@@ -2408,12 +2417,6 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans, | |||
2408 | struct btrfs_root *root, u64 offset); | 2417 | struct btrfs_root *root, u64 offset); |
2409 | int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); | 2418 | int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); |
2410 | 2419 | ||
2411 | /* inode-map.c */ | ||
2412 | int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, | ||
2413 | struct btrfs_root *fs_root, | ||
2414 | u64 dirid, u64 *objectid); | ||
2415 | int btrfs_find_highest_inode(struct btrfs_root *fs_root, u64 *objectid); | ||
2416 | |||
2417 | /* inode-item.c */ | 2420 | /* inode-item.c */ |
2418 | int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, | 2421 | int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, |
2419 | struct btrfs_root *root, | 2422 | struct btrfs_root *root, |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index ef6865c17cd6..d02683b1ee16 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include "locking.h" | 41 | #include "locking.h" |
42 | #include "tree-log.h" | 42 | #include "tree-log.h" |
43 | #include "free-space-cache.h" | 43 | #include "free-space-cache.h" |
44 | #include "inode-map.h" | ||
44 | 45 | ||
45 | static struct extent_io_ops btree_extent_io_ops; | 46 | static struct extent_io_ops btree_extent_io_ops; |
46 | static void end_workqueue_fn(struct btrfs_work *work); | 47 | static void end_workqueue_fn(struct btrfs_work *work); |
@@ -1327,6 +1328,19 @@ again: | |||
1327 | if (IS_ERR(root)) | 1328 | if (IS_ERR(root)) |
1328 | return root; | 1329 | return root; |
1329 | 1330 | ||
1331 | root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); | ||
1332 | if (!root->free_ino_ctl) | ||
1333 | goto fail; | ||
1334 | root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), | ||
1335 | GFP_NOFS); | ||
1336 | if (!root->free_ino_pinned) | ||
1337 | goto fail; | ||
1338 | |||
1339 | btrfs_init_free_ino_ctl(root); | ||
1340 | mutex_init(&root->fs_commit_mutex); | ||
1341 | spin_lock_init(&root->cache_lock); | ||
1342 | init_waitqueue_head(&root->cache_wait); | ||
1343 | |||
1330 | set_anon_super(&root->anon_super, NULL); | 1344 | set_anon_super(&root->anon_super, NULL); |
1331 | 1345 | ||
1332 | if (btrfs_root_refs(&root->root_item) == 0) { | 1346 | if (btrfs_root_refs(&root->root_item) == 0) { |
@@ -2483,6 +2497,8 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) | |||
2483 | if (btrfs_root_refs(&root->root_item) == 0) | 2497 | if (btrfs_root_refs(&root->root_item) == 0) |
2484 | synchronize_srcu(&fs_info->subvol_srcu); | 2498 | synchronize_srcu(&fs_info->subvol_srcu); |
2485 | 2499 | ||
2500 | __btrfs_remove_free_space_cache(root->free_ino_pinned); | ||
2501 | __btrfs_remove_free_space_cache(root->free_ino_ctl); | ||
2486 | free_fs_root(root); | 2502 | free_fs_root(root); |
2487 | return 0; | 2503 | return 0; |
2488 | } | 2504 | } |
@@ -2496,6 +2512,8 @@ static void free_fs_root(struct btrfs_root *root) | |||
2496 | } | 2512 | } |
2497 | free_extent_buffer(root->node); | 2513 | free_extent_buffer(root->node); |
2498 | free_extent_buffer(root->commit_root); | 2514 | free_extent_buffer(root->commit_root); |
2515 | kfree(root->free_ino_ctl); | ||
2516 | kfree(root->free_ino_pinned); | ||
2499 | kfree(root->name); | 2517 | kfree(root->name); |
2500 | kfree(root); | 2518 | kfree(root); |
2501 | } | 2519 | } |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index d4fb4f077a79..2ce89bfc8815 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include "transaction.h" | 25 | #include "transaction.h" |
26 | #include "disk-io.h" | 26 | #include "disk-io.h" |
27 | #include "extent_io.h" | 27 | #include "extent_io.h" |
28 | #include "inode-map.h" | ||
28 | 29 | ||
29 | #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) | 30 | #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) |
30 | #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) | 31 | #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) |
@@ -105,7 +106,7 @@ int create_free_space_inode(struct btrfs_root *root, | |||
105 | u64 objectid; | 106 | u64 objectid; |
106 | int ret; | 107 | int ret; |
107 | 108 | ||
108 | ret = btrfs_find_free_objectid(trans, root, 0, &objectid); | 109 | ret = btrfs_find_free_objectid(root, &objectid); |
109 | if (ret < 0) | 110 | if (ret < 0) |
110 | return ret; | 111 | return ret; |
111 | 112 | ||
@@ -1496,10 +1497,9 @@ bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, | |||
1496 | return merged; | 1497 | return merged; |
1497 | } | 1498 | } |
1498 | 1499 | ||
1499 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 1500 | int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, |
1500 | u64 offset, u64 bytes) | 1501 | u64 offset, u64 bytes) |
1501 | { | 1502 | { |
1502 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1503 | struct btrfs_free_space *info; | 1503 | struct btrfs_free_space *info; |
1504 | int ret = 0; | 1504 | int ret = 0; |
1505 | 1505 | ||
@@ -1751,11 +1751,29 @@ out: | |||
1751 | return 0; | 1751 | return 0; |
1752 | } | 1752 | } |
1753 | 1753 | ||
1754 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | 1754 | void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) |
1755 | { | 1755 | { |
1756 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1757 | struct btrfs_free_space *info; | 1756 | struct btrfs_free_space *info; |
1758 | struct rb_node *node; | 1757 | struct rb_node *node; |
1758 | |||
1759 | spin_lock(&ctl->tree_lock); | ||
1760 | while ((node = rb_last(&ctl->free_space_offset)) != NULL) { | ||
1761 | info = rb_entry(node, struct btrfs_free_space, offset_index); | ||
1762 | unlink_free_space(ctl, info); | ||
1763 | kfree(info->bitmap); | ||
1764 | kmem_cache_free(btrfs_free_space_cachep, info); | ||
1765 | if (need_resched()) { | ||
1766 | spin_unlock(&ctl->tree_lock); | ||
1767 | cond_resched(); | ||
1768 | spin_lock(&ctl->tree_lock); | ||
1769 | } | ||
1770 | } | ||
1771 | spin_unlock(&ctl->tree_lock); | ||
1772 | } | ||
1773 | |||
1774 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | ||
1775 | { | ||
1776 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1759 | struct btrfs_free_cluster *cluster; | 1777 | struct btrfs_free_cluster *cluster; |
1760 | struct list_head *head; | 1778 | struct list_head *head; |
1761 | 1779 | ||
@@ -1773,21 +1791,9 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | |||
1773 | spin_lock(&ctl->tree_lock); | 1791 | spin_lock(&ctl->tree_lock); |
1774 | } | 1792 | } |
1775 | } | 1793 | } |
1776 | |||
1777 | while ((node = rb_last(&ctl->free_space_offset)) != NULL) { | ||
1778 | info = rb_entry(node, struct btrfs_free_space, offset_index); | ||
1779 | unlink_free_space(ctl, info); | ||
1780 | if (info->bitmap) | ||
1781 | kfree(info->bitmap); | ||
1782 | kmem_cache_free(btrfs_free_space_cachep, info); | ||
1783 | if (need_resched()) { | ||
1784 | spin_unlock(&ctl->tree_lock); | ||
1785 | cond_resched(); | ||
1786 | spin_lock(&ctl->tree_lock); | ||
1787 | } | ||
1788 | } | ||
1789 | |||
1790 | spin_unlock(&ctl->tree_lock); | 1794 | spin_unlock(&ctl->tree_lock); |
1795 | |||
1796 | __btrfs_remove_free_space_cache(ctl); | ||
1791 | } | 1797 | } |
1792 | 1798 | ||
1793 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | 1799 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, |
@@ -2352,3 +2358,53 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | |||
2352 | 2358 | ||
2353 | return ret; | 2359 | return ret; |
2354 | } | 2360 | } |
2361 | |||
2362 | /* | ||
2363 | * Find the left-most item in the cache tree, and then return the | ||
2364 | * smallest inode number in the item. | ||
2365 | * | ||
2366 | * Note: the returned inode number may not be the smallest one in | ||
2367 | * the tree, if the left-most item is a bitmap. | ||
2368 | */ | ||
2369 | u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root) | ||
2370 | { | ||
2371 | struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl; | ||
2372 | struct btrfs_free_space *entry = NULL; | ||
2373 | u64 ino = 0; | ||
2374 | |||
2375 | spin_lock(&ctl->tree_lock); | ||
2376 | |||
2377 | if (RB_EMPTY_ROOT(&ctl->free_space_offset)) | ||
2378 | goto out; | ||
2379 | |||
2380 | entry = rb_entry(rb_first(&ctl->free_space_offset), | ||
2381 | struct btrfs_free_space, offset_index); | ||
2382 | |||
2383 | if (!entry->bitmap) { | ||
2384 | ino = entry->offset; | ||
2385 | |||
2386 | unlink_free_space(ctl, entry); | ||
2387 | entry->offset++; | ||
2388 | entry->bytes--; | ||
2389 | if (!entry->bytes) | ||
2390 | kmem_cache_free(btrfs_free_space_cachep, entry); | ||
2391 | else | ||
2392 | link_free_space(ctl, entry); | ||
2393 | } else { | ||
2394 | u64 offset = 0; | ||
2395 | u64 count = 1; | ||
2396 | int ret; | ||
2397 | |||
2398 | ret = search_bitmap(ctl, entry, &offset, &count); | ||
2399 | BUG_ON(ret); | ||
2400 | |||
2401 | ino = offset; | ||
2402 | bitmap_clear_bits(ctl, entry, offset, 1); | ||
2403 | if (entry->bytes == 0) | ||
2404 | free_bitmap(ctl, entry); | ||
2405 | } | ||
2406 | out: | ||
2407 | spin_unlock(&ctl->tree_lock); | ||
2408 | |||
2409 | return ino; | ||
2410 | } | ||
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index a64a23fae1eb..af06e6b6ceaa 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h | |||
@@ -64,15 +64,25 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
64 | struct btrfs_trans_handle *trans, | 64 | struct btrfs_trans_handle *trans, |
65 | struct btrfs_block_group_cache *block_group, | 65 | struct btrfs_block_group_cache *block_group, |
66 | struct btrfs_path *path); | 66 | struct btrfs_path *path); |
67 | |||
67 | void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); | 68 | void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); |
68 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 69 | int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, |
69 | u64 bytenr, u64 size); | 70 | u64 bytenr, u64 size); |
71 | static inline int | ||
72 | btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | ||
73 | u64 bytenr, u64 size) | ||
74 | { | ||
75 | return __btrfs_add_free_space(block_group->free_space_ctl, | ||
76 | bytenr, size); | ||
77 | } | ||
70 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | 78 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |
71 | u64 bytenr, u64 size); | 79 | u64 bytenr, u64 size); |
80 | void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl); | ||
72 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache | 81 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache |
73 | *block_group); | 82 | *block_group); |
74 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | 83 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, |
75 | u64 offset, u64 bytes, u64 empty_size); | 84 | u64 offset, u64 bytes, u64 empty_size); |
85 | u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root); | ||
76 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | 86 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, |
77 | u64 bytes); | 87 | u64 bytes); |
78 | int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | 88 | int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, |
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index c05a08f4c411..5be62df90c4f 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c | |||
@@ -16,11 +16,343 @@ | |||
16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/kthread.h> | ||
21 | #include <linux/pagemap.h> | ||
22 | |||
19 | #include "ctree.h" | 23 | #include "ctree.h" |
20 | #include "disk-io.h" | 24 | #include "disk-io.h" |
25 | #include "free-space-cache.h" | ||
26 | #include "inode-map.h" | ||
21 | #include "transaction.h" | 27 | #include "transaction.h" |
22 | 28 | ||
23 | int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid) | 29 | static int caching_kthread(void *data) |
30 | { | ||
31 | struct btrfs_root *root = data; | ||
32 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
33 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
34 | struct btrfs_key key; | ||
35 | struct btrfs_path *path; | ||
36 | struct extent_buffer *leaf; | ||
37 | u64 last = (u64)-1; | ||
38 | int slot; | ||
39 | int ret; | ||
40 | |||
41 | path = btrfs_alloc_path(); | ||
42 | if (!path) | ||
43 | return -ENOMEM; | ||
44 | |||
45 | /* Since the commit root is read-only, we can safely skip locking. */ | ||
46 | path->skip_locking = 1; | ||
47 | path->search_commit_root = 1; | ||
48 | path->reada = 2; | ||
49 | |||
50 | key.objectid = BTRFS_FIRST_FREE_OBJECTID; | ||
51 | key.offset = 0; | ||
52 | key.type = BTRFS_INODE_ITEM_KEY; | ||
53 | again: | ||
54 | /* need to make sure the commit_root doesn't disappear */ | ||
55 | mutex_lock(&root->fs_commit_mutex); | ||
56 | |||
57 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
58 | if (ret < 0) | ||
59 | goto out; | ||
60 | |||
61 | while (1) { | ||
62 | smp_mb(); | ||
63 | if (fs_info->closing > 1) | ||
64 | goto out; | ||
65 | |||
66 | leaf = path->nodes[0]; | ||
67 | slot = path->slots[0]; | ||
68 | if (path->slots[0] >= btrfs_header_nritems(leaf)) { | ||
69 | ret = btrfs_next_leaf(root, path); | ||
70 | if (ret < 0) | ||
71 | goto out; | ||
72 | else if (ret > 0) | ||
73 | break; | ||
74 | |||
75 | if (need_resched() || | ||
76 | btrfs_transaction_in_commit(fs_info)) { | ||
77 | leaf = path->nodes[0]; | ||
78 | |||
79 | if (btrfs_header_nritems(leaf) == 0) { | ||
80 | WARN_ON(1); | ||
81 | break; | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * Save the key so we can advances forward | ||
86 | * in the next search. | ||
87 | */ | ||
88 | btrfs_item_key_to_cpu(leaf, &key, 0); | ||
89 | btrfs_release_path(root, path); | ||
90 | root->cache_progress = last; | ||
91 | mutex_unlock(&root->fs_commit_mutex); | ||
92 | schedule_timeout(1); | ||
93 | goto again; | ||
94 | } else | ||
95 | continue; | ||
96 | } | ||
97 | |||
98 | btrfs_item_key_to_cpu(leaf, &key, slot); | ||
99 | |||
100 | if (key.type != BTRFS_INODE_ITEM_KEY) | ||
101 | goto next; | ||
102 | |||
103 | if (key.objectid >= BTRFS_LAST_FREE_OBJECTID) | ||
104 | break; | ||
105 | |||
106 | if (last != (u64)-1 && last + 1 != key.objectid) { | ||
107 | __btrfs_add_free_space(ctl, last + 1, | ||
108 | key.objectid - last - 1); | ||
109 | wake_up(&root->cache_wait); | ||
110 | } | ||
111 | |||
112 | last = key.objectid; | ||
113 | next: | ||
114 | path->slots[0]++; | ||
115 | } | ||
116 | |||
117 | if (last < BTRFS_LAST_FREE_OBJECTID - 1) { | ||
118 | __btrfs_add_free_space(ctl, last + 1, | ||
119 | BTRFS_LAST_FREE_OBJECTID - last - 1); | ||
120 | } | ||
121 | |||
122 | spin_lock(&root->cache_lock); | ||
123 | root->cached = BTRFS_CACHE_FINISHED; | ||
124 | spin_unlock(&root->cache_lock); | ||
125 | |||
126 | root->cache_progress = (u64)-1; | ||
127 | btrfs_unpin_free_ino(root); | ||
128 | out: | ||
129 | wake_up(&root->cache_wait); | ||
130 | mutex_unlock(&root->fs_commit_mutex); | ||
131 | |||
132 | btrfs_free_path(path); | ||
133 | |||
134 | return ret; | ||
135 | } | ||
136 | |||
137 | static void start_caching(struct btrfs_root *root) | ||
138 | { | ||
139 | struct task_struct *tsk; | ||
140 | |||
141 | spin_lock(&root->cache_lock); | ||
142 | if (root->cached != BTRFS_CACHE_NO) { | ||
143 | spin_unlock(&root->cache_lock); | ||
144 | return; | ||
145 | } | ||
146 | |||
147 | root->cached = BTRFS_CACHE_STARTED; | ||
148 | spin_unlock(&root->cache_lock); | ||
149 | |||
150 | tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", | ||
151 | root->root_key.objectid); | ||
152 | BUG_ON(IS_ERR(tsk)); | ||
153 | } | ||
154 | |||
155 | int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) | ||
156 | { | ||
157 | again: | ||
158 | *objectid = btrfs_find_ino_for_alloc(root); | ||
159 | |||
160 | if (*objectid != 0) | ||
161 | return 0; | ||
162 | |||
163 | start_caching(root); | ||
164 | |||
165 | wait_event(root->cache_wait, | ||
166 | root->cached == BTRFS_CACHE_FINISHED || | ||
167 | root->free_ino_ctl->free_space > 0); | ||
168 | |||
169 | if (root->cached == BTRFS_CACHE_FINISHED && | ||
170 | root->free_ino_ctl->free_space == 0) | ||
171 | return -ENOSPC; | ||
172 | else | ||
173 | goto again; | ||
174 | } | ||
175 | |||
176 | void btrfs_return_ino(struct btrfs_root *root, u64 objectid) | ||
177 | { | ||
178 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
179 | struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; | ||
180 | again: | ||
181 | if (root->cached == BTRFS_CACHE_FINISHED) { | ||
182 | __btrfs_add_free_space(ctl, objectid, 1); | ||
183 | } else { | ||
184 | /* | ||
185 | * If we are in the process of caching free ino chunks, | ||
186 | * to avoid adding the same inode number to the free_ino | ||
187 | * tree twice due to cross transaction, we'll leave it | ||
188 | * in the pinned tree until a transaction is committed | ||
189 | * or the caching work is done. | ||
190 | */ | ||
191 | |||
192 | mutex_lock(&root->fs_commit_mutex); | ||
193 | spin_lock(&root->cache_lock); | ||
194 | if (root->cached == BTRFS_CACHE_FINISHED) { | ||
195 | spin_unlock(&root->cache_lock); | ||
196 | mutex_unlock(&root->fs_commit_mutex); | ||
197 | goto again; | ||
198 | } | ||
199 | spin_unlock(&root->cache_lock); | ||
200 | |||
201 | start_caching(root); | ||
202 | |||
203 | if (objectid <= root->cache_progress) | ||
204 | __btrfs_add_free_space(ctl, objectid, 1); | ||
205 | else | ||
206 | __btrfs_add_free_space(pinned, objectid, 1); | ||
207 | |||
208 | mutex_unlock(&root->fs_commit_mutex); | ||
209 | } | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * When a transaction is committed, we'll move those inode numbers which | ||
214 | * are smaller than root->cache_progress from pinned tree to free_ino tree, | ||
215 | * and others will just be dropped, because the commit root we were | ||
216 | * searching has changed. | ||
217 | * | ||
218 | * Must be called with root->fs_commit_mutex held | ||
219 | */ | ||
220 | void btrfs_unpin_free_ino(struct btrfs_root *root) | ||
221 | { | ||
222 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
223 | struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset; | ||
224 | struct btrfs_free_space *info; | ||
225 | struct rb_node *n; | ||
226 | u64 count; | ||
227 | |||
228 | while (1) { | ||
229 | n = rb_first(rbroot); | ||
230 | if (!n) | ||
231 | break; | ||
232 | |||
233 | info = rb_entry(n, struct btrfs_free_space, offset_index); | ||
234 | BUG_ON(info->bitmap); | ||
235 | |||
236 | if (info->offset > root->cache_progress) | ||
237 | goto free; | ||
238 | else if (info->offset + info->bytes > root->cache_progress) | ||
239 | count = root->cache_progress - info->offset + 1; | ||
240 | else | ||
241 | count = info->bytes; | ||
242 | |||
243 | __btrfs_add_free_space(ctl, info->offset, count); | ||
244 | free: | ||
245 | rb_erase(&info->offset_index, rbroot); | ||
246 | kfree(info); | ||
247 | } | ||
248 | } | ||
249 | |||
250 | #define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space)) | ||
251 | #define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8) | ||
252 | |||
253 | /* | ||
254 | * The goal is to keep the memory used by the free_ino tree won't | ||
255 | * exceed the memory if we use bitmaps only. | ||
256 | */ | ||
257 | static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) | ||
258 | { | ||
259 | struct btrfs_free_space *info; | ||
260 | struct rb_node *n; | ||
261 | int max_ino; | ||
262 | int max_bitmaps; | ||
263 | |||
264 | n = rb_last(&ctl->free_space_offset); | ||
265 | if (!n) { | ||
266 | ctl->extents_thresh = INIT_THRESHOLD; | ||
267 | return; | ||
268 | } | ||
269 | info = rb_entry(n, struct btrfs_free_space, offset_index); | ||
270 | |||
271 | /* | ||
272 | * Find the maximum inode number in the filesystem. Note we | ||
273 | * ignore the fact that this can be a bitmap, because we are | ||
274 | * not doing precise calculation. | ||
275 | */ | ||
276 | max_ino = info->bytes - 1; | ||
277 | |||
278 | max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP; | ||
279 | if (max_bitmaps <= ctl->total_bitmaps) { | ||
280 | ctl->extents_thresh = 0; | ||
281 | return; | ||
282 | } | ||
283 | |||
284 | ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) * | ||
285 | PAGE_CACHE_SIZE / sizeof(*info); | ||
286 | } | ||
287 | |||
288 | /* | ||
289 | * We don't fall back to bitmap, if we are below the extents threshold | ||
290 | * or this chunk of inode numbers is a big one. | ||
291 | */ | ||
292 | static bool use_bitmap(struct btrfs_free_space_ctl *ctl, | ||
293 | struct btrfs_free_space *info) | ||
294 | { | ||
295 | if (ctl->free_extents < ctl->extents_thresh || | ||
296 | info->bytes > INODES_PER_BITMAP / 10) | ||
297 | return false; | ||
298 | |||
299 | return true; | ||
300 | } | ||
301 | |||
302 | static struct btrfs_free_space_op free_ino_op = { | ||
303 | .recalc_thresholds = recalculate_thresholds, | ||
304 | .use_bitmap = use_bitmap, | ||
305 | }; | ||
306 | |||
307 | static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl) | ||
308 | { | ||
309 | } | ||
310 | |||
311 | static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl, | ||
312 | struct btrfs_free_space *info) | ||
313 | { | ||
314 | /* | ||
315 | * We always use extents for two reasons: | ||
316 | * | ||
317 | * - The pinned tree is only used during the process of caching | ||
318 | * work. | ||
319 | * - Make code simpler. See btrfs_unpin_free_ino(). | ||
320 | */ | ||
321 | return false; | ||
322 | } | ||
323 | |||
324 | static struct btrfs_free_space_op pinned_free_ino_op = { | ||
325 | .recalc_thresholds = pinned_recalc_thresholds, | ||
326 | .use_bitmap = pinned_use_bitmap, | ||
327 | }; | ||
328 | |||
329 | void btrfs_init_free_ino_ctl(struct btrfs_root *root) | ||
330 | { | ||
331 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
332 | struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; | ||
333 | |||
334 | spin_lock_init(&ctl->tree_lock); | ||
335 | ctl->unit = 1; | ||
336 | ctl->start = 0; | ||
337 | ctl->private = NULL; | ||
338 | ctl->op = &free_ino_op; | ||
339 | |||
340 | /* | ||
341 | * Initially we allow to use 16K of ram to cache chunks of | ||
342 | * inode numbers before we resort to bitmaps. This is somewhat | ||
343 | * arbitrary, but it will be adjusted in runtime. | ||
344 | */ | ||
345 | ctl->extents_thresh = INIT_THRESHOLD; | ||
346 | |||
347 | spin_lock_init(&pinned->tree_lock); | ||
348 | pinned->unit = 1; | ||
349 | pinned->start = 0; | ||
350 | pinned->private = NULL; | ||
351 | pinned->extents_thresh = 0; | ||
352 | pinned->op = &pinned_free_ino_op; | ||
353 | } | ||
354 | |||
355 | static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) | ||
24 | { | 356 | { |
25 | struct btrfs_path *path; | 357 | struct btrfs_path *path; |
26 | int ret; | 358 | int ret; |
@@ -55,15 +387,14 @@ error: | |||
55 | return ret; | 387 | return ret; |
56 | } | 388 | } |
57 | 389 | ||
58 | int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, | 390 | int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid) |
59 | struct btrfs_root *root, | ||
60 | u64 dirid, u64 *objectid) | ||
61 | { | 391 | { |
62 | int ret; | 392 | int ret; |
63 | mutex_lock(&root->objectid_mutex); | 393 | mutex_lock(&root->objectid_mutex); |
64 | 394 | ||
65 | if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) { | 395 | if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) { |
66 | ret = btrfs_find_highest_inode(root, &root->highest_objectid); | 396 | ret = btrfs_find_highest_objectid(root, |
397 | &root->highest_objectid); | ||
67 | if (ret) | 398 | if (ret) |
68 | goto out; | 399 | goto out; |
69 | } | 400 | } |
diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h new file mode 100644 index 000000000000..eb918451b492 --- /dev/null +++ b/fs/btrfs/inode-map.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #ifndef __BTRFS_INODE_MAP | ||
2 | #define __BTRFS_INODE_MAP | ||
3 | |||
4 | void btrfs_init_free_ino_ctl(struct btrfs_root *root); | ||
5 | void btrfs_unpin_free_ino(struct btrfs_root *root); | ||
6 | void btrfs_return_ino(struct btrfs_root *root, u64 objectid); | ||
7 | int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid); | ||
8 | |||
9 | int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid); | ||
10 | |||
11 | #endif | ||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a4157cfdd533..77dd0a776c83 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include "compression.h" | 51 | #include "compression.h" |
52 | #include "locking.h" | 52 | #include "locking.h" |
53 | #include "free-space-cache.h" | 53 | #include "free-space-cache.h" |
54 | #include "inode-map.h" | ||
54 | 55 | ||
55 | struct btrfs_iget_args { | 56 | struct btrfs_iget_args { |
56 | u64 ino; | 57 | u64 ino; |
@@ -3809,6 +3810,10 @@ void btrfs_evict_inode(struct inode *inode) | |||
3809 | BUG_ON(ret); | 3810 | BUG_ON(ret); |
3810 | } | 3811 | } |
3811 | 3812 | ||
3813 | if (!(root == root->fs_info->tree_root || | ||
3814 | root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) | ||
3815 | btrfs_return_ino(root, inode->i_ino); | ||
3816 | |||
3812 | nr = trans->blocks_used; | 3817 | nr = trans->blocks_used; |
3813 | btrfs_end_transaction(trans, root); | 3818 | btrfs_end_transaction(trans, root); |
3814 | btrfs_btree_balance_dirty(root, nr); | 3819 | btrfs_btree_balance_dirty(root, nr); |
@@ -4538,6 +4543,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
4538 | return ERR_PTR(-ENOMEM); | 4543 | return ERR_PTR(-ENOMEM); |
4539 | } | 4544 | } |
4540 | 4545 | ||
4546 | /* | ||
4547 | * we have to initialize this early, so we can reclaim the inode | ||
4548 | * number if we fail afterwards in this function. | ||
4549 | */ | ||
4550 | inode->i_ino = objectid; | ||
4551 | |||
4541 | if (dir) { | 4552 | if (dir) { |
4542 | trace_btrfs_inode_request(dir); | 4553 | trace_btrfs_inode_request(dir); |
4543 | 4554 | ||
@@ -4583,7 +4594,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
4583 | goto fail; | 4594 | goto fail; |
4584 | 4595 | ||
4585 | inode_init_owner(inode, dir, mode); | 4596 | inode_init_owner(inode, dir, mode); |
4586 | inode->i_ino = objectid; | ||
4587 | inode_set_bytes(inode, 0); | 4597 | inode_set_bytes(inode, 0); |
4588 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | 4598 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
4589 | inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], | 4599 | inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], |
@@ -4712,10 +4722,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, | |||
4712 | if (!new_valid_dev(rdev)) | 4722 | if (!new_valid_dev(rdev)) |
4713 | return -EINVAL; | 4723 | return -EINVAL; |
4714 | 4724 | ||
4715 | err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); | ||
4716 | if (err) | ||
4717 | return err; | ||
4718 | |||
4719 | /* | 4725 | /* |
4720 | * 2 for inode item and ref | 4726 | * 2 for inode item and ref |
4721 | * 2 for dir items | 4727 | * 2 for dir items |
@@ -4727,6 +4733,10 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, | |||
4727 | 4733 | ||
4728 | btrfs_set_trans_block_group(trans, dir); | 4734 | btrfs_set_trans_block_group(trans, dir); |
4729 | 4735 | ||
4736 | err = btrfs_find_free_ino(root, &objectid); | ||
4737 | if (err) | ||
4738 | goto out_unlock; | ||
4739 | |||
4730 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, | 4740 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
4731 | dentry->d_name.len, dir->i_ino, objectid, | 4741 | dentry->d_name.len, dir->i_ino, objectid, |
4732 | BTRFS_I(dir)->block_group, mode, &index); | 4742 | BTRFS_I(dir)->block_group, mode, &index); |
@@ -4774,9 +4784,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, | |||
4774 | u64 objectid; | 4784 | u64 objectid; |
4775 | u64 index = 0; | 4785 | u64 index = 0; |
4776 | 4786 | ||
4777 | err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); | ||
4778 | if (err) | ||
4779 | return err; | ||
4780 | /* | 4787 | /* |
4781 | * 2 for inode item and ref | 4788 | * 2 for inode item and ref |
4782 | * 2 for dir items | 4789 | * 2 for dir items |
@@ -4788,6 +4795,10 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, | |||
4788 | 4795 | ||
4789 | btrfs_set_trans_block_group(trans, dir); | 4796 | btrfs_set_trans_block_group(trans, dir); |
4790 | 4797 | ||
4798 | err = btrfs_find_free_ino(root, &objectid); | ||
4799 | if (err) | ||
4800 | goto out_unlock; | ||
4801 | |||
4791 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, | 4802 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
4792 | dentry->d_name.len, dir->i_ino, objectid, | 4803 | dentry->d_name.len, dir->i_ino, objectid, |
4793 | BTRFS_I(dir)->block_group, mode, &index); | 4804 | BTRFS_I(dir)->block_group, mode, &index); |
@@ -4902,10 +4913,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
4902 | u64 index = 0; | 4913 | u64 index = 0; |
4903 | unsigned long nr = 1; | 4914 | unsigned long nr = 1; |
4904 | 4915 | ||
4905 | err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); | ||
4906 | if (err) | ||
4907 | return err; | ||
4908 | |||
4909 | /* | 4916 | /* |
4910 | * 2 items for inode and ref | 4917 | * 2 items for inode and ref |
4911 | * 2 items for dir items | 4918 | * 2 items for dir items |
@@ -4916,6 +4923,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
4916 | return PTR_ERR(trans); | 4923 | return PTR_ERR(trans); |
4917 | btrfs_set_trans_block_group(trans, dir); | 4924 | btrfs_set_trans_block_group(trans, dir); |
4918 | 4925 | ||
4926 | err = btrfs_find_free_ino(root, &objectid); | ||
4927 | if (err) | ||
4928 | goto out_fail; | ||
4929 | |||
4919 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, | 4930 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
4920 | dentry->d_name.len, dir->i_ino, objectid, | 4931 | dentry->d_name.len, dir->i_ino, objectid, |
4921 | BTRFS_I(dir)->block_group, S_IFDIR | mode, | 4932 | BTRFS_I(dir)->block_group, S_IFDIR | mode, |
@@ -7257,9 +7268,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, | |||
7257 | if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) | 7268 | if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) |
7258 | return -ENAMETOOLONG; | 7269 | return -ENAMETOOLONG; |
7259 | 7270 | ||
7260 | err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); | ||
7261 | if (err) | ||
7262 | return err; | ||
7263 | /* | 7271 | /* |
7264 | * 2 items for inode item and ref | 7272 | * 2 items for inode item and ref |
7265 | * 2 items for dir items | 7273 | * 2 items for dir items |
@@ -7271,6 +7279,10 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, | |||
7271 | 7279 | ||
7272 | btrfs_set_trans_block_group(trans, dir); | 7280 | btrfs_set_trans_block_group(trans, dir); |
7273 | 7281 | ||
7282 | err = btrfs_find_free_ino(root, &objectid); | ||
7283 | if (err) | ||
7284 | goto out_unlock; | ||
7285 | |||
7274 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, | 7286 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
7275 | dentry->d_name.len, dir->i_ino, objectid, | 7287 | dentry->d_name.len, dir->i_ino, objectid, |
7276 | BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, | 7288 | BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f580a3a5d2fc..e1835f8eec93 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include "print-tree.h" | 50 | #include "print-tree.h" |
51 | #include "volumes.h" | 51 | #include "volumes.h" |
52 | #include "locking.h" | 52 | #include "locking.h" |
53 | #include "inode-map.h" | ||
53 | 54 | ||
54 | /* Mask out flags that are inappropriate for the given type of inode. */ | 55 | /* Mask out flags that are inappropriate for the given type of inode. */ |
55 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) | 56 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) |
@@ -323,8 +324,7 @@ static noinline int create_subvol(struct btrfs_root *root, | |||
323 | u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; | 324 | u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; |
324 | u64 index = 0; | 325 | u64 index = 0; |
325 | 326 | ||
326 | ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root, | 327 | ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid); |
327 | 0, &objectid); | ||
328 | if (ret) { | 328 | if (ret) { |
329 | dput(parent); | 329 | dput(parent); |
330 | return ret; | 330 | return ret; |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 58250e09eb05..e6cb89357256 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "btrfs_inode.h" | 30 | #include "btrfs_inode.h" |
31 | #include "async-thread.h" | 31 | #include "async-thread.h" |
32 | #include "free-space-cache.h" | 32 | #include "free-space-cache.h" |
33 | #include "inode-map.h" | ||
33 | 34 | ||
34 | /* | 35 | /* |
35 | * backref_node, mapping_node and tree_block start with this | 36 | * backref_node, mapping_node and tree_block start with this |
@@ -3897,7 +3898,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, | |||
3897 | if (IS_ERR(trans)) | 3898 | if (IS_ERR(trans)) |
3898 | return ERR_CAST(trans); | 3899 | return ERR_CAST(trans); |
3899 | 3900 | ||
3900 | err = btrfs_find_free_objectid(trans, root, objectid, &objectid); | 3901 | err = btrfs_find_free_objectid(root, &objectid); |
3901 | if (err) | 3902 | if (err) |
3902 | goto out; | 3903 | goto out; |
3903 | 3904 | ||
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c571734d5e5a..aef6c81e7101 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include "transaction.h" | 27 | #include "transaction.h" |
28 | #include "locking.h" | 28 | #include "locking.h" |
29 | #include "tree-log.h" | 29 | #include "tree-log.h" |
30 | #include "inode-map.h" | ||
30 | 31 | ||
31 | #define BTRFS_ROOT_TRANS_TAG 0 | 32 | #define BTRFS_ROOT_TRANS_TAG 0 |
32 | 33 | ||
@@ -761,7 +762,11 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, | |||
761 | btrfs_orphan_commit_root(trans, root); | 762 | btrfs_orphan_commit_root(trans, root); |
762 | 763 | ||
763 | if (root->commit_root != root->node) { | 764 | if (root->commit_root != root->node) { |
765 | mutex_lock(&root->fs_commit_mutex); | ||
764 | switch_commit_root(root); | 766 | switch_commit_root(root); |
767 | btrfs_unpin_free_ino(root); | ||
768 | mutex_unlock(&root->fs_commit_mutex); | ||
769 | |||
765 | btrfs_set_root_node(&root->root_item, | 770 | btrfs_set_root_node(&root->root_item, |
766 | root->node); | 771 | root->node); |
767 | } | 772 | } |
@@ -930,7 +935,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
930 | goto fail; | 935 | goto fail; |
931 | } | 936 | } |
932 | 937 | ||
933 | ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid); | 938 | ret = btrfs_find_free_objectid(tree_root, &objectid); |
934 | if (ret) { | 939 | if (ret) { |
935 | pending->error = ret; | 940 | pending->error = ret; |
936 | goto fail; | 941 | goto fail; |