aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/btrfs_inode.h9
-rw-r--r--fs/btrfs/compression.c5
-rw-r--r--fs/btrfs/ctree.h29
-rw-r--r--fs/btrfs/disk-io.c19
-rw-r--r--fs/btrfs/export.c25
-rw-r--r--fs/btrfs/extent-tree.c50
-rw-r--r--fs/btrfs/extent_io.c4
-rw-r--r--fs/btrfs/file-item.c5
-rw-r--r--fs/btrfs/file.c27
-rw-r--r--fs/btrfs/free-space-cache.c976
-rw-r--r--fs/btrfs/free-space-cache.h48
-rw-r--r--fs/btrfs/inode-map.c428
-rw-r--r--fs/btrfs/inode-map.h13
-rw-r--r--fs/btrfs/inode.c282
-rw-r--r--fs/btrfs/ioctl.c22
-rw-r--r--fs/btrfs/relocation.c27
-rw-r--r--fs/btrfs/transaction.c13
-rw-r--r--fs/btrfs/tree-log.c54
-rw-r--r--fs/btrfs/xattr.c8
19 files changed, 1407 insertions, 637 deletions
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 57c3bb2884c..8842a4195f9 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -166,6 +166,15 @@ static inline struct btrfs_inode *BTRFS_I(struct inode *inode)
166 return container_of(inode, struct btrfs_inode, vfs_inode); 166 return container_of(inode, struct btrfs_inode, vfs_inode);
167} 167}
168 168
169static inline u64 btrfs_ino(struct inode *inode)
170{
171 u64 ino = BTRFS_I(inode)->location.objectid;
172
173 if (ino <= BTRFS_FIRST_FREE_OBJECTID)
174 ino = inode->i_ino;
175 return ino;
176}
177
169static inline void btrfs_i_size_write(struct inode *inode, u64 size) 178static inline void btrfs_i_size_write(struct inode *inode, u64 size)
170{ 179{
171 i_size_write(inode, size); 180 i_size_write(inode, size);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 41d1d7c70e2..369d5068ac7 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -125,9 +125,10 @@ static int check_compressed_csum(struct inode *inode,
125 kunmap_atomic(kaddr, KM_USER0); 125 kunmap_atomic(kaddr, KM_USER0);
126 126
127 if (csum != *cb_sum) { 127 if (csum != *cb_sum) {
128 printk(KERN_INFO "btrfs csum failed ino %lu " 128 printk(KERN_INFO "btrfs csum failed ino %llu "
129 "extent %llu csum %u " 129 "extent %llu csum %u "
130 "wanted %u mirror %d\n", inode->i_ino, 130 "wanted %u mirror %d\n",
131 (unsigned long long)btrfs_ino(inode),
131 (unsigned long long)disk_start, 132 (unsigned long long)disk_start,
132 csum, *cb_sum, cb->mirror_num); 133 csum, *cb_sum, cb->mirror_num);
133 ret = -EIO; 134 ret = -EIO;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8f4b81de3ae..11a103db286 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -105,6 +105,12 @@ struct btrfs_ordered_sum;
105/* For storing free space cache */ 105/* For storing free space cache */
106#define BTRFS_FREE_SPACE_OBJECTID -11ULL 106#define BTRFS_FREE_SPACE_OBJECTID -11ULL
107 107
108/*
109 * The inode number assigned to the special inode for sotring
110 * free ino cache
111 */
112#define BTRFS_FREE_INO_OBJECTID -12ULL
113
108/* dummy objectid represents multiple objectids */ 114/* dummy objectid represents multiple objectids */
109#define BTRFS_MULTIPLE_OBJECTIDS -255ULL 115#define BTRFS_MULTIPLE_OBJECTIDS -255ULL
110 116
@@ -830,9 +836,6 @@ struct btrfs_block_group_cache {
830 u64 bytes_super; 836 u64 bytes_super;
831 u64 flags; 837 u64 flags;
832 u64 sectorsize; 838 u64 sectorsize;
833 int extents_thresh;
834 int free_extents;
835 int total_bitmaps;
836 unsigned int ro:1; 839 unsigned int ro:1;
837 unsigned int dirty:1; 840 unsigned int dirty:1;
838 unsigned int iref:1; 841 unsigned int iref:1;
@@ -847,9 +850,7 @@ struct btrfs_block_group_cache {
847 struct btrfs_space_info *space_info; 850 struct btrfs_space_info *space_info;
848 851
849 /* free space cache stuff */ 852 /* free space cache stuff */
850 spinlock_t tree_lock; 853 struct btrfs_free_space_ctl *free_space_ctl;
851 struct rb_root free_space_offset;
852 u64 free_space;
853 854
854 /* block group cache stuff */ 855 /* block group cache stuff */
855 struct rb_node cache_node; 856 struct rb_node cache_node;
@@ -1107,6 +1108,16 @@ struct btrfs_root {
1107 spinlock_t accounting_lock; 1108 spinlock_t accounting_lock;
1108 struct btrfs_block_rsv *block_rsv; 1109 struct btrfs_block_rsv *block_rsv;
1109 1110
1111 /* free ino cache stuff */
1112 struct mutex fs_commit_mutex;
1113 struct btrfs_free_space_ctl *free_ino_ctl;
1114 enum btrfs_caching_type cached;
1115 spinlock_t cache_lock;
1116 wait_queue_head_t cache_wait;
1117 struct btrfs_free_space_ctl *free_ino_pinned;
1118 u64 cache_progress;
1119 struct inode *cache_inode;
1120
1110 struct mutex log_mutex; 1121 struct mutex log_mutex;
1111 wait_queue_head_t log_writer_wait; 1122 wait_queue_head_t log_writer_wait;
1112 wait_queue_head_t log_commit_wait[2]; 1123 wait_queue_head_t log_commit_wait[2];
@@ -2413,12 +2424,6 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
2413 struct btrfs_root *root, u64 offset); 2424 struct btrfs_root *root, u64 offset);
2414int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); 2425int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset);
2415 2426
2416/* inode-map.c */
2417int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
2418 struct btrfs_root *fs_root,
2419 u64 dirid, u64 *objectid);
2420int btrfs_find_highest_inode(struct btrfs_root *fs_root, u64 *objectid);
2421
2422/* inode-item.c */ 2427/* inode-item.c */
2423int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, 2428int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
2424 struct btrfs_root *root, 2429 struct btrfs_root *root,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 228cf36ece8..ac1cd20d1c0 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -41,6 +41,7 @@
41#include "locking.h" 41#include "locking.h"
42#include "tree-log.h" 42#include "tree-log.h"
43#include "free-space-cache.h" 43#include "free-space-cache.h"
44#include "inode-map.h"
44 45
45static struct extent_io_ops btree_extent_io_ops; 46static struct extent_io_ops btree_extent_io_ops;
46static void end_workqueue_fn(struct btrfs_work *work); 47static void end_workqueue_fn(struct btrfs_work *work);
@@ -1326,6 +1327,19 @@ again:
1326 if (IS_ERR(root)) 1327 if (IS_ERR(root))
1327 return root; 1328 return root;
1328 1329
1330 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1331 if (!root->free_ino_ctl)
1332 goto fail;
1333 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1334 GFP_NOFS);
1335 if (!root->free_ino_pinned)
1336 goto fail;
1337
1338 btrfs_init_free_ino_ctl(root);
1339 mutex_init(&root->fs_commit_mutex);
1340 spin_lock_init(&root->cache_lock);
1341 init_waitqueue_head(&root->cache_wait);
1342
1329 set_anon_super(&root->anon_super, NULL); 1343 set_anon_super(&root->anon_super, NULL);
1330 1344
1331 if (btrfs_root_refs(&root->root_item) == 0) { 1345 if (btrfs_root_refs(&root->root_item) == 0) {
@@ -2404,12 +2418,15 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2404 if (btrfs_root_refs(&root->root_item) == 0) 2418 if (btrfs_root_refs(&root->root_item) == 0)
2405 synchronize_srcu(&fs_info->subvol_srcu); 2419 synchronize_srcu(&fs_info->subvol_srcu);
2406 2420
2421 __btrfs_remove_free_space_cache(root->free_ino_pinned);
2422 __btrfs_remove_free_space_cache(root->free_ino_ctl);
2407 free_fs_root(root); 2423 free_fs_root(root);
2408 return 0; 2424 return 0;
2409} 2425}
2410 2426
2411static void free_fs_root(struct btrfs_root *root) 2427static void free_fs_root(struct btrfs_root *root)
2412{ 2428{
2429 iput(root->cache_inode);
2413 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); 2430 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
2414 if (root->anon_super.s_dev) { 2431 if (root->anon_super.s_dev) {
2415 down_write(&root->anon_super.s_umount); 2432 down_write(&root->anon_super.s_umount);
@@ -2417,6 +2434,8 @@ static void free_fs_root(struct btrfs_root *root)
2417 } 2434 }
2418 free_extent_buffer(root->node); 2435 free_extent_buffer(root->node);
2419 free_extent_buffer(root->commit_root); 2436 free_extent_buffer(root->commit_root);
2437 kfree(root->free_ino_ctl);
2438 kfree(root->free_ino_pinned);
2420 kfree(root->name); 2439 kfree(root->name);
2421 kfree(root); 2440 kfree(root);
2422} 2441}
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index b4ffad859ad..1b8dc33778f 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -32,7 +32,7 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
32 len = BTRFS_FID_SIZE_NON_CONNECTABLE; 32 len = BTRFS_FID_SIZE_NON_CONNECTABLE;
33 type = FILEID_BTRFS_WITHOUT_PARENT; 33 type = FILEID_BTRFS_WITHOUT_PARENT;
34 34
35 fid->objectid = inode->i_ino; 35 fid->objectid = btrfs_ino(inode);
36 fid->root_objectid = BTRFS_I(inode)->root->objectid; 36 fid->root_objectid = BTRFS_I(inode)->root->objectid;
37 fid->gen = inode->i_generation; 37 fid->gen = inode->i_generation;
38 38
@@ -178,13 +178,13 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
178 if (!path) 178 if (!path)
179 return ERR_PTR(-ENOMEM); 179 return ERR_PTR(-ENOMEM);
180 180
181 if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { 181 if (btrfs_ino(dir) == BTRFS_FIRST_FREE_OBJECTID) {
182 key.objectid = root->root_key.objectid; 182 key.objectid = root->root_key.objectid;
183 key.type = BTRFS_ROOT_BACKREF_KEY; 183 key.type = BTRFS_ROOT_BACKREF_KEY;
184 key.offset = (u64)-1; 184 key.offset = (u64)-1;
185 root = root->fs_info->tree_root; 185 root = root->fs_info->tree_root;
186 } else { 186 } else {
187 key.objectid = dir->i_ino; 187 key.objectid = btrfs_ino(dir);
188 key.type = BTRFS_INODE_REF_KEY; 188 key.type = BTRFS_INODE_REF_KEY;
189 key.offset = (u64)-1; 189 key.offset = (u64)-1;
190 } 190 }
@@ -244,6 +244,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
244 struct btrfs_key key; 244 struct btrfs_key key;
245 int name_len; 245 int name_len;
246 int ret; 246 int ret;
247 u64 ino;
247 248
248 if (!dir || !inode) 249 if (!dir || !inode)
249 return -EINVAL; 250 return -EINVAL;
@@ -251,19 +252,21 @@ static int btrfs_get_name(struct dentry *parent, char *name,
251 if (!S_ISDIR(dir->i_mode)) 252 if (!S_ISDIR(dir->i_mode))
252 return -EINVAL; 253 return -EINVAL;
253 254
255 ino = btrfs_ino(inode);
256
254 path = btrfs_alloc_path(); 257 path = btrfs_alloc_path();
255 if (!path) 258 if (!path)
256 return -ENOMEM; 259 return -ENOMEM;
257 path->leave_spinning = 1; 260 path->leave_spinning = 1;
258 261
259 if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { 262 if (ino == BTRFS_FIRST_FREE_OBJECTID) {
260 key.objectid = BTRFS_I(inode)->root->root_key.objectid; 263 key.objectid = BTRFS_I(inode)->root->root_key.objectid;
261 key.type = BTRFS_ROOT_BACKREF_KEY; 264 key.type = BTRFS_ROOT_BACKREF_KEY;
262 key.offset = (u64)-1; 265 key.offset = (u64)-1;
263 root = root->fs_info->tree_root; 266 root = root->fs_info->tree_root;
264 } else { 267 } else {
265 key.objectid = inode->i_ino; 268 key.objectid = ino;
266 key.offset = dir->i_ino; 269 key.offset = btrfs_ino(dir);
267 key.type = BTRFS_INODE_REF_KEY; 270 key.type = BTRFS_INODE_REF_KEY;
268 } 271 }
269 272
@@ -272,7 +275,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
272 btrfs_free_path(path); 275 btrfs_free_path(path);
273 return ret; 276 return ret;
274 } else if (ret > 0) { 277 } else if (ret > 0) {
275 if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { 278 if (ino == BTRFS_FIRST_FREE_OBJECTID) {
276 path->slots[0]--; 279 path->slots[0]--;
277 } else { 280 } else {
278 btrfs_free_path(path); 281 btrfs_free_path(path);
@@ -281,11 +284,11 @@ static int btrfs_get_name(struct dentry *parent, char *name,
281 } 284 }
282 leaf = path->nodes[0]; 285 leaf = path->nodes[0];
283 286
284 if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { 287 if (ino == BTRFS_FIRST_FREE_OBJECTID) {
285 rref = btrfs_item_ptr(leaf, path->slots[0], 288 rref = btrfs_item_ptr(leaf, path->slots[0],
286 struct btrfs_root_ref); 289 struct btrfs_root_ref);
287 name_ptr = (unsigned long)(rref + 1); 290 name_ptr = (unsigned long)(rref + 1);
288 name_len = btrfs_root_ref_name_len(leaf, rref); 291 name_len = btrfs_root_ref_name_len(leaf, rref);
289 } else { 292 } else {
290 iref = btrfs_item_ptr(leaf, path->slots[0], 293 iref = btrfs_item_ptr(leaf, path->slots[0],
291 struct btrfs_inode_ref); 294 struct btrfs_inode_ref);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9ee6bd55e16..e530c20989f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -105,6 +105,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
105 WARN_ON(cache->pinned > 0); 105 WARN_ON(cache->pinned > 0);
106 WARN_ON(cache->reserved > 0); 106 WARN_ON(cache->reserved > 0);
107 WARN_ON(cache->reserved_pinned > 0); 107 WARN_ON(cache->reserved_pinned > 0);
108 kfree(cache->free_space_ctl);
108 kfree(cache); 109 kfree(cache);
109 } 110 }
110} 111}
@@ -3144,7 +3145,8 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3144 /* make sure bytes are sectorsize aligned */ 3145 /* make sure bytes are sectorsize aligned */
3145 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 3146 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3146 3147
3147 if (root == root->fs_info->tree_root) { 3148 if (root == root->fs_info->tree_root ||
3149 BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3148 alloc_chunk = 0; 3150 alloc_chunk = 0;
3149 committed = 1; 3151 committed = 1;
3150 } 3152 }
@@ -4893,7 +4895,7 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4893 return 0; 4895 return 0;
4894 4896
4895 wait_event(caching_ctl->wait, block_group_cache_done(cache) || 4897 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
4896 (cache->free_space >= num_bytes)); 4898 (cache->free_space_ctl->free_space >= num_bytes));
4897 4899
4898 put_caching_control(caching_ctl); 4900 put_caching_control(caching_ctl);
4899 return 0; 4901 return 0;
@@ -7008,8 +7010,8 @@ static noinline int get_new_locations(struct inode *reloc_inode,
7008 7010
7009 cur_pos = extent_key->objectid - offset; 7011 cur_pos = extent_key->objectid - offset;
7010 last_byte = extent_key->objectid + extent_key->offset; 7012 last_byte = extent_key->objectid + extent_key->offset;
7011 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, 7013 ret = btrfs_lookup_file_extent(NULL, root, path,
7012 cur_pos, 0); 7014 btrfs_ino(reloc_inode), cur_pos, 0);
7013 if (ret < 0) 7015 if (ret < 0)
7014 goto out; 7016 goto out;
7015 if (ret > 0) { 7017 if (ret > 0) {
@@ -7032,7 +7034,7 @@ static noinline int get_new_locations(struct inode *reloc_inode,
7032 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 7034 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7033 if (found_key.offset != cur_pos || 7035 if (found_key.offset != cur_pos ||
7034 found_key.type != BTRFS_EXTENT_DATA_KEY || 7036 found_key.type != BTRFS_EXTENT_DATA_KEY ||
7035 found_key.objectid != reloc_inode->i_ino) 7037 found_key.objectid != btrfs_ino(reloc_inode))
7036 break; 7038 break;
7037 7039
7038 fi = btrfs_item_ptr(leaf, path->slots[0], 7040 fi = btrfs_item_ptr(leaf, path->slots[0],
@@ -7178,7 +7180,7 @@ next:
7178 break; 7180 break;
7179 } 7181 }
7180 7182
7181 if (inode && key.objectid != inode->i_ino) { 7183 if (inode && key.objectid != btrfs_ino(inode)) {
7182 BUG_ON(extent_locked); 7184 BUG_ON(extent_locked);
7183 btrfs_release_path(root, path); 7185 btrfs_release_path(root, path);
7184 mutex_unlock(&inode->i_mutex); 7186 mutex_unlock(&inode->i_mutex);
@@ -7487,7 +7489,7 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root,
7487 continue; 7489 continue;
7488 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) 7490 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
7489 continue; 7491 continue;
7490 if (!inode || inode->i_ino != key.objectid) { 7492 if (!inode || btrfs_ino(inode) != key.objectid) {
7491 iput(inode); 7493 iput(inode);
7492 inode = btrfs_ilookup(target_root->fs_info->sb, 7494 inode = btrfs_ilookup(target_root->fs_info->sb,
7493 key.objectid, target_root, 1); 7495 key.objectid, target_root, 1);
@@ -8555,10 +8557,16 @@ int btrfs_read_block_groups(struct btrfs_root *root)
8555 ret = -ENOMEM; 8557 ret = -ENOMEM;
8556 goto error; 8558 goto error;
8557 } 8559 }
8560 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8561 GFP_NOFS);
8562 if (!cache->free_space_ctl) {
8563 kfree(cache);
8564 ret = -ENOMEM;
8565 goto error;
8566 }
8558 8567
8559 atomic_set(&cache->count, 1); 8568 atomic_set(&cache->count, 1);
8560 spin_lock_init(&cache->lock); 8569 spin_lock_init(&cache->lock);
8561 spin_lock_init(&cache->tree_lock);
8562 cache->fs_info = info; 8570 cache->fs_info = info;
8563 INIT_LIST_HEAD(&cache->list); 8571 INIT_LIST_HEAD(&cache->list);
8564 INIT_LIST_HEAD(&cache->cluster_list); 8572 INIT_LIST_HEAD(&cache->cluster_list);
@@ -8566,14 +8574,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
8566 if (need_clear) 8574 if (need_clear)
8567 cache->disk_cache_state = BTRFS_DC_CLEAR; 8575 cache->disk_cache_state = BTRFS_DC_CLEAR;
8568 8576
8569 /*
8570 * we only want to have 32k of ram per block group for keeping
8571 * track of free space, and if we pass 1/2 of that we want to
8572 * start converting things over to using bitmaps
8573 */
8574 cache->extents_thresh = ((1024 * 32) / 2) /
8575 sizeof(struct btrfs_free_space);
8576
8577 read_extent_buffer(leaf, &cache->item, 8577 read_extent_buffer(leaf, &cache->item,
8578 btrfs_item_ptr_offset(leaf, path->slots[0]), 8578 btrfs_item_ptr_offset(leaf, path->slots[0]),
8579 sizeof(cache->item)); 8579 sizeof(cache->item));
@@ -8584,6 +8584,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
8584 cache->flags = btrfs_block_group_flags(&cache->item); 8584 cache->flags = btrfs_block_group_flags(&cache->item);
8585 cache->sectorsize = root->sectorsize; 8585 cache->sectorsize = root->sectorsize;
8586 8586
8587 btrfs_init_free_space_ctl(cache);
8588
8587 /* 8589 /*
8588 * We need to exclude the super stripes now so that the space 8590 * We need to exclude the super stripes now so that the space
8589 * info has super bytes accounted for, otherwise we'll think 8591 * info has super bytes accounted for, otherwise we'll think
@@ -8670,6 +8672,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8670 cache = kzalloc(sizeof(*cache), GFP_NOFS); 8672 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8671 if (!cache) 8673 if (!cache)
8672 return -ENOMEM; 8674 return -ENOMEM;
8675 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8676 GFP_NOFS);
8677 if (!cache->free_space_ctl) {
8678 kfree(cache);
8679 return -ENOMEM;
8680 }
8673 8681
8674 cache->key.objectid = chunk_offset; 8682 cache->key.objectid = chunk_offset;
8675 cache->key.offset = size; 8683 cache->key.offset = size;
@@ -8677,19 +8685,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8677 cache->sectorsize = root->sectorsize; 8685 cache->sectorsize = root->sectorsize;
8678 cache->fs_info = root->fs_info; 8686 cache->fs_info = root->fs_info;
8679 8687
8680 /*
8681 * we only want to have 32k of ram per block group for keeping track
8682 * of free space, and if we pass 1/2 of that we want to start
8683 * converting things over to using bitmaps
8684 */
8685 cache->extents_thresh = ((1024 * 32) / 2) /
8686 sizeof(struct btrfs_free_space);
8687 atomic_set(&cache->count, 1); 8688 atomic_set(&cache->count, 1);
8688 spin_lock_init(&cache->lock); 8689 spin_lock_init(&cache->lock);
8689 spin_lock_init(&cache->tree_lock);
8690 INIT_LIST_HEAD(&cache->list); 8690 INIT_LIST_HEAD(&cache->list);
8691 INIT_LIST_HEAD(&cache->cluster_list); 8691 INIT_LIST_HEAD(&cache->cluster_list);
8692 8692
8693 btrfs_init_free_space_ctl(cache);
8694
8693 btrfs_set_block_group_used(&cache->item, bytes_used); 8695 btrfs_set_block_group_used(&cache->item, bytes_used);
8694 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); 8696 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8695 cache->flags = type; 8697 cache->flags = type;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index ba41da59e31..4bf90abea3d 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3030,7 +3030,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3030 * because there might be preallocation past i_size 3030 * because there might be preallocation past i_size
3031 */ 3031 */
3032 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, 3032 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
3033 path, inode->i_ino, -1, 0); 3033 path, btrfs_ino(inode), -1, 0);
3034 if (ret < 0) { 3034 if (ret < 0) {
3035 btrfs_free_path(path); 3035 btrfs_free_path(path);
3036 return ret; 3036 return ret;
@@ -3043,7 +3043,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3043 found_type = btrfs_key_type(&found_key); 3043 found_type = btrfs_key_type(&found_key);
3044 3044
3045 /* No extents, but there might be delalloc bits */ 3045 /* No extents, but there might be delalloc bits */
3046 if (found_key.objectid != inode->i_ino || 3046 if (found_key.objectid != btrfs_ino(inode) ||
3047 found_type != BTRFS_EXTENT_DATA_KEY) { 3047 found_type != BTRFS_EXTENT_DATA_KEY) {
3048 /* have to trust i_size as the end */ 3048 /* have to trust i_size as the end */
3049 last = (u64)-1; 3049 last = (u64)-1;
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index a6a9d4e8b49..1d9410e3921 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -208,8 +208,9 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
208 EXTENT_NODATASUM, GFP_NOFS); 208 EXTENT_NODATASUM, GFP_NOFS);
209 } else { 209 } else {
210 printk(KERN_INFO "btrfs no csum found " 210 printk(KERN_INFO "btrfs no csum found "
211 "for inode %lu start %llu\n", 211 "for inode %llu start %llu\n",
212 inode->i_ino, 212 (unsigned long long)
213 btrfs_ino(inode),
213 (unsigned long long)offset); 214 (unsigned long long)offset);
214 } 215 }
215 item = NULL; 216 item = NULL;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 75899a01dde..bef02045152 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -298,6 +298,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
298 struct btrfs_path *path; 298 struct btrfs_path *path;
299 struct btrfs_key key; 299 struct btrfs_key key;
300 struct btrfs_key new_key; 300 struct btrfs_key new_key;
301 u64 ino = btrfs_ino(inode);
301 u64 search_start = start; 302 u64 search_start = start;
302 u64 disk_bytenr = 0; 303 u64 disk_bytenr = 0;
303 u64 num_bytes = 0; 304 u64 num_bytes = 0;
@@ -318,14 +319,14 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
318 319
319 while (1) { 320 while (1) {
320 recow = 0; 321 recow = 0;
321 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 322 ret = btrfs_lookup_file_extent(trans, root, path, ino,
322 search_start, -1); 323 search_start, -1);
323 if (ret < 0) 324 if (ret < 0)
324 break; 325 break;
325 if (ret > 0 && path->slots[0] > 0 && search_start == start) { 326 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
326 leaf = path->nodes[0]; 327 leaf = path->nodes[0];
327 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 328 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
328 if (key.objectid == inode->i_ino && 329 if (key.objectid == ino &&
329 key.type == BTRFS_EXTENT_DATA_KEY) 330 key.type == BTRFS_EXTENT_DATA_KEY)
330 path->slots[0]--; 331 path->slots[0]--;
331 } 332 }
@@ -346,7 +347,7 @@ next_slot:
346 } 347 }
347 348
348 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 349 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
349 if (key.objectid > inode->i_ino || 350 if (key.objectid > ino ||
350 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) 351 key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
351 break; 352 break;
352 353
@@ -592,6 +593,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
592 int del_slot = 0; 593 int del_slot = 0;
593 int recow; 594 int recow;
594 int ret; 595 int ret;
596 u64 ino = btrfs_ino(inode);
595 597
596 btrfs_drop_extent_cache(inode, start, end - 1, 0); 598 btrfs_drop_extent_cache(inode, start, end - 1, 0);
597 599
@@ -600,7 +602,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
600again: 602again:
601 recow = 0; 603 recow = 0;
602 split = start; 604 split = start;
603 key.objectid = inode->i_ino; 605 key.objectid = ino;
604 key.type = BTRFS_EXTENT_DATA_KEY; 606 key.type = BTRFS_EXTENT_DATA_KEY;
605 key.offset = split; 607 key.offset = split;
606 608
@@ -612,8 +614,7 @@ again:
612 614
613 leaf = path->nodes[0]; 615 leaf = path->nodes[0];
614 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 616 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
615 BUG_ON(key.objectid != inode->i_ino || 617 BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
616 key.type != BTRFS_EXTENT_DATA_KEY);
617 fi = btrfs_item_ptr(leaf, path->slots[0], 618 fi = btrfs_item_ptr(leaf, path->slots[0],
618 struct btrfs_file_extent_item); 619 struct btrfs_file_extent_item);
619 BUG_ON(btrfs_file_extent_type(leaf, fi) != 620 BUG_ON(btrfs_file_extent_type(leaf, fi) !=
@@ -630,7 +631,7 @@ again:
630 other_start = 0; 631 other_start = 0;
631 other_end = start; 632 other_end = start;
632 if (extent_mergeable(leaf, path->slots[0] - 1, 633 if (extent_mergeable(leaf, path->slots[0] - 1,
633 inode->i_ino, bytenr, orig_offset, 634 ino, bytenr, orig_offset,
634 &other_start, &other_end)) { 635 &other_start, &other_end)) {
635 new_key.offset = end; 636 new_key.offset = end;
636 btrfs_set_item_key_safe(trans, root, path, &new_key); 637 btrfs_set_item_key_safe(trans, root, path, &new_key);
@@ -653,7 +654,7 @@ again:
653 other_start = end; 654 other_start = end;
654 other_end = 0; 655 other_end = 0;
655 if (extent_mergeable(leaf, path->slots[0] + 1, 656 if (extent_mergeable(leaf, path->slots[0] + 1,
656 inode->i_ino, bytenr, orig_offset, 657 ino, bytenr, orig_offset,
657 &other_start, &other_end)) { 658 &other_start, &other_end)) {
658 fi = btrfs_item_ptr(leaf, path->slots[0], 659 fi = btrfs_item_ptr(leaf, path->slots[0],
659 struct btrfs_file_extent_item); 660 struct btrfs_file_extent_item);
@@ -702,7 +703,7 @@ again:
702 703
703 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, 704 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
704 root->root_key.objectid, 705 root->root_key.objectid,
705 inode->i_ino, orig_offset); 706 ino, orig_offset);
706 BUG_ON(ret); 707 BUG_ON(ret);
707 708
708 if (split == start) { 709 if (split == start) {
@@ -718,7 +719,7 @@ again:
718 other_start = end; 719 other_start = end;
719 other_end = 0; 720 other_end = 0;
720 if (extent_mergeable(leaf, path->slots[0] + 1, 721 if (extent_mergeable(leaf, path->slots[0] + 1,
721 inode->i_ino, bytenr, orig_offset, 722 ino, bytenr, orig_offset,
722 &other_start, &other_end)) { 723 &other_start, &other_end)) {
723 if (recow) { 724 if (recow) {
724 btrfs_release_path(root, path); 725 btrfs_release_path(root, path);
@@ -729,13 +730,13 @@ again:
729 del_nr++; 730 del_nr++;
730 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 731 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
731 0, root->root_key.objectid, 732 0, root->root_key.objectid,
732 inode->i_ino, orig_offset); 733 ino, orig_offset);
733 BUG_ON(ret); 734 BUG_ON(ret);
734 } 735 }
735 other_start = 0; 736 other_start = 0;
736 other_end = start; 737 other_end = start;
737 if (extent_mergeable(leaf, path->slots[0] - 1, 738 if (extent_mergeable(leaf, path->slots[0] - 1,
738 inode->i_ino, bytenr, orig_offset, 739 ino, bytenr, orig_offset,
739 &other_start, &other_end)) { 740 &other_start, &other_end)) {
740 if (recow) { 741 if (recow) {
741 btrfs_release_path(root, path); 742 btrfs_release_path(root, path);
@@ -746,7 +747,7 @@ again:
746 del_nr++; 747 del_nr++;
747 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 748 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
748 0, root->root_key.objectid, 749 0, root->root_key.objectid,
749 inode->i_ino, orig_offset); 750 ino, orig_offset);
750 BUG_ON(ret); 751 BUG_ON(ret);
751 } 752 }
752 if (del_nr == 0) { 753 if (del_nr == 0) {
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 63731a1fb0a..25a13ab750f 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -25,18 +25,17 @@
25#include "transaction.h" 25#include "transaction.h"
26#include "disk-io.h" 26#include "disk-io.h"
27#include "extent_io.h" 27#include "extent_io.h"
28#include "inode-map.h"
28 29
29#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) 30#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
30#define MAX_CACHE_BYTES_PER_GIG (32 * 1024) 31#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
31 32
32static void recalculate_thresholds(struct btrfs_block_group_cache 33static int link_free_space(struct btrfs_free_space_ctl *ctl,
33 *block_group);
34static int link_free_space(struct btrfs_block_group_cache *block_group,
35 struct btrfs_free_space *info); 34 struct btrfs_free_space *info);
36 35
37struct inode *lookup_free_space_inode(struct btrfs_root *root, 36static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
38 struct btrfs_block_group_cache 37 struct btrfs_path *path,
39 *block_group, struct btrfs_path *path) 38 u64 offset)
40{ 39{
41 struct btrfs_key key; 40 struct btrfs_key key;
42 struct btrfs_key location; 41 struct btrfs_key location;
@@ -46,15 +45,8 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
46 struct inode *inode = NULL; 45 struct inode *inode = NULL;
47 int ret; 46 int ret;
48 47
49 spin_lock(&block_group->lock);
50 if (block_group->inode)
51 inode = igrab(block_group->inode);
52 spin_unlock(&block_group->lock);
53 if (inode)
54 return inode;
55
56 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 48 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
57 key.offset = block_group->key.objectid; 49 key.offset = offset;
58 key.type = 0; 50 key.type = 0;
59 51
60 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 52 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -84,6 +76,27 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
84 76
85 inode->i_mapping->flags &= ~__GFP_FS; 77 inode->i_mapping->flags &= ~__GFP_FS;
86 78
79 return inode;
80}
81
82struct inode *lookup_free_space_inode(struct btrfs_root *root,
83 struct btrfs_block_group_cache
84 *block_group, struct btrfs_path *path)
85{
86 struct inode *inode = NULL;
87
88 spin_lock(&block_group->lock);
89 if (block_group->inode)
90 inode = igrab(block_group->inode);
91 spin_unlock(&block_group->lock);
92 if (inode)
93 return inode;
94
95 inode = __lookup_free_space_inode(root, path,
96 block_group->key.objectid);
97 if (IS_ERR(inode))
98 return inode;
99
87 spin_lock(&block_group->lock); 100 spin_lock(&block_group->lock);
88 if (!root->fs_info->closing) { 101 if (!root->fs_info->closing) {
89 block_group->inode = igrab(inode); 102 block_group->inode = igrab(inode);
@@ -94,24 +107,18 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
94 return inode; 107 return inode;
95} 108}
96 109
97int create_free_space_inode(struct btrfs_root *root, 110int __create_free_space_inode(struct btrfs_root *root,
98 struct btrfs_trans_handle *trans, 111 struct btrfs_trans_handle *trans,
99 struct btrfs_block_group_cache *block_group, 112 struct btrfs_path *path, u64 ino, u64 offset)
100 struct btrfs_path *path)
101{ 113{
102 struct btrfs_key key; 114 struct btrfs_key key;
103 struct btrfs_disk_key disk_key; 115 struct btrfs_disk_key disk_key;
104 struct btrfs_free_space_header *header; 116 struct btrfs_free_space_header *header;
105 struct btrfs_inode_item *inode_item; 117 struct btrfs_inode_item *inode_item;
106 struct extent_buffer *leaf; 118 struct extent_buffer *leaf;
107 u64 objectid;
108 int ret; 119 int ret;
109 120
110 ret = btrfs_find_free_objectid(trans, root, 0, &objectid); 121 ret = btrfs_insert_empty_inode(trans, root, path, ino);
111 if (ret < 0)
112 return ret;
113
114 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
115 if (ret) 122 if (ret)
116 return ret; 123 return ret;
117 124
@@ -131,13 +138,12 @@ int create_free_space_inode(struct btrfs_root *root,
131 BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM); 138 BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM);
132 btrfs_set_inode_nlink(leaf, inode_item, 1); 139 btrfs_set_inode_nlink(leaf, inode_item, 1);
133 btrfs_set_inode_transid(leaf, inode_item, trans->transid); 140 btrfs_set_inode_transid(leaf, inode_item, trans->transid);
134 btrfs_set_inode_block_group(leaf, inode_item, 141 btrfs_set_inode_block_group(leaf, inode_item, offset);
135 block_group->key.objectid);
136 btrfs_mark_buffer_dirty(leaf); 142 btrfs_mark_buffer_dirty(leaf);
137 btrfs_release_path(root, path); 143 btrfs_release_path(root, path);
138 144
139 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 145 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
140 key.offset = block_group->key.objectid; 146 key.offset = offset;
141 key.type = 0; 147 key.type = 0;
142 148
143 ret = btrfs_insert_empty_item(trans, root, path, &key, 149 ret = btrfs_insert_empty_item(trans, root, path, &key,
@@ -157,6 +163,22 @@ int create_free_space_inode(struct btrfs_root *root,
157 return 0; 163 return 0;
158} 164}
159 165
166int create_free_space_inode(struct btrfs_root *root,
167 struct btrfs_trans_handle *trans,
168 struct btrfs_block_group_cache *block_group,
169 struct btrfs_path *path)
170{
171 int ret;
172 u64 ino;
173
174 ret = btrfs_find_free_objectid(root, &ino);
175 if (ret < 0)
176 return ret;
177
178 return __create_free_space_inode(root, trans, path, ino,
179 block_group->key.objectid);
180}
181
160int btrfs_truncate_free_space_cache(struct btrfs_root *root, 182int btrfs_truncate_free_space_cache(struct btrfs_root *root,
161 struct btrfs_trans_handle *trans, 183 struct btrfs_trans_handle *trans,
162 struct btrfs_path *path, 184 struct btrfs_path *path,
@@ -187,7 +209,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
187 return ret; 209 return ret;
188 } 210 }
189 211
190 return btrfs_update_inode(trans, root, inode); 212 ret = btrfs_update_inode(trans, root, inode);
213 return ret;
191} 214}
192 215
193static int readahead_cache(struct inode *inode) 216static int readahead_cache(struct inode *inode)
@@ -209,15 +232,13 @@ static int readahead_cache(struct inode *inode)
209 return 0; 232 return 0;
210} 233}
211 234
212int load_free_space_cache(struct btrfs_fs_info *fs_info, 235int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
213 struct btrfs_block_group_cache *block_group) 236 struct btrfs_free_space_ctl *ctl,
237 struct btrfs_path *path, u64 offset)
214{ 238{
215 struct btrfs_root *root = fs_info->tree_root;
216 struct inode *inode;
217 struct btrfs_free_space_header *header; 239 struct btrfs_free_space_header *header;
218 struct extent_buffer *leaf; 240 struct extent_buffer *leaf;
219 struct page *page; 241 struct page *page;
220 struct btrfs_path *path;
221 u32 *checksums = NULL, *crc; 242 u32 *checksums = NULL, *crc;
222 char *disk_crcs = NULL; 243 char *disk_crcs = NULL;
223 struct btrfs_key key; 244 struct btrfs_key key;
@@ -225,76 +246,47 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
225 u64 num_entries; 246 u64 num_entries;
226 u64 num_bitmaps; 247 u64 num_bitmaps;
227 u64 generation; 248 u64 generation;
228 u64 used = btrfs_block_group_used(&block_group->item);
229 u32 cur_crc = ~(u32)0; 249 u32 cur_crc = ~(u32)0;
230 pgoff_t index = 0; 250 pgoff_t index = 0;
231 unsigned long first_page_offset; 251 unsigned long first_page_offset;
232 int num_checksums; 252 int num_checksums;
233 int ret = 0; 253 int ret = 0, ret2;
234
235 /*
236 * If we're unmounting then just return, since this does a search on the
237 * normal root and not the commit root and we could deadlock.
238 */
239 smp_mb();
240 if (fs_info->closing)
241 return 0;
242
243 /*
244 * If this block group has been marked to be cleared for one reason or
245 * another then we can't trust the on disk cache, so just return.
246 */
247 spin_lock(&block_group->lock);
248 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
249 spin_unlock(&block_group->lock);
250 return 0;
251 }
252 spin_unlock(&block_group->lock);
253 254
254 INIT_LIST_HEAD(&bitmaps); 255 INIT_LIST_HEAD(&bitmaps);
255 256
256 path = btrfs_alloc_path();
257 if (!path)
258 return 0;
259
260 inode = lookup_free_space_inode(root, block_group, path);
261 if (IS_ERR(inode)) {
262 btrfs_free_path(path);
263 return 0;
264 }
265
266 /* Nothing in the space cache, goodbye */ 257 /* Nothing in the space cache, goodbye */
267 if (!i_size_read(inode)) { 258 if (!i_size_read(inode))
268 btrfs_free_path(path);
269 goto out; 259 goto out;
270 }
271 260
272 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 261 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
273 key.offset = block_group->key.objectid; 262 key.offset = offset;
274 key.type = 0; 263 key.type = 0;
275 264
276 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 265 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
277 if (ret) { 266 if (ret < 0)
278 btrfs_free_path(path); 267 goto out;
268 else if (ret > 0) {
269 btrfs_release_path(root, path);
270 ret = 0;
279 goto out; 271 goto out;
280 } 272 }
281 273
274 ret = -1;
275
282 leaf = path->nodes[0]; 276 leaf = path->nodes[0];
283 header = btrfs_item_ptr(leaf, path->slots[0], 277 header = btrfs_item_ptr(leaf, path->slots[0],
284 struct btrfs_free_space_header); 278 struct btrfs_free_space_header);
285 num_entries = btrfs_free_space_entries(leaf, header); 279 num_entries = btrfs_free_space_entries(leaf, header);
286 num_bitmaps = btrfs_free_space_bitmaps(leaf, header); 280 num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
287 generation = btrfs_free_space_generation(leaf, header); 281 generation = btrfs_free_space_generation(leaf, header);
288 btrfs_free_path(path); 282 btrfs_release_path(root, path);
289 283
290 if (BTRFS_I(inode)->generation != generation) { 284 if (BTRFS_I(inode)->generation != generation) {
291 printk(KERN_ERR "btrfs: free space inode generation (%llu) did" 285 printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
292 " not match free space cache generation (%llu) for " 286 " not match free space cache generation (%llu)\n",
293 "block group %llu\n",
294 (unsigned long long)BTRFS_I(inode)->generation, 287 (unsigned long long)BTRFS_I(inode)->generation,
295 (unsigned long long)generation, 288 (unsigned long long)generation);
296 (unsigned long long)block_group->key.objectid); 289 goto out;
297 goto free_cache;
298 } 290 }
299 291
300 if (!num_entries) 292 if (!num_entries)
@@ -311,10 +303,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
311 goto out; 303 goto out;
312 304
313 ret = readahead_cache(inode); 305 ret = readahead_cache(inode);
314 if (ret) { 306 if (ret)
315 ret = 0;
316 goto out; 307 goto out;
317 }
318 308
319 while (1) { 309 while (1) {
320 struct btrfs_free_space_entry *entry; 310 struct btrfs_free_space_entry *entry;
@@ -333,10 +323,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
333 } 323 }
334 324
335 page = grab_cache_page(inode->i_mapping, index); 325 page = grab_cache_page(inode->i_mapping, index);
336 if (!page) { 326 if (!page)
337 ret = 0;
338 goto free_cache; 327 goto free_cache;
339 }
340 328
341 if (!PageUptodate(page)) { 329 if (!PageUptodate(page)) {
342 btrfs_readpage(NULL, page); 330 btrfs_readpage(NULL, page);
@@ -345,9 +333,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
345 unlock_page(page); 333 unlock_page(page);
346 page_cache_release(page); 334 page_cache_release(page);
347 printk(KERN_ERR "btrfs: error reading free " 335 printk(KERN_ERR "btrfs: error reading free "
348 "space cache: %llu\n", 336 "space cache\n");
349 (unsigned long long)
350 block_group->key.objectid);
351 goto free_cache; 337 goto free_cache;
352 } 338 }
353 } 339 }
@@ -360,13 +346,10 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
360 gen = addr + (sizeof(u32) * num_checksums); 346 gen = addr + (sizeof(u32) * num_checksums);
361 if (*gen != BTRFS_I(inode)->generation) { 347 if (*gen != BTRFS_I(inode)->generation) {
362 printk(KERN_ERR "btrfs: space cache generation" 348 printk(KERN_ERR "btrfs: space cache generation"
363 " (%llu) does not match inode (%llu) " 349 " (%llu) does not match inode (%llu)\n",
364 "for block group %llu\n",
365 (unsigned long long)*gen, 350 (unsigned long long)*gen,
366 (unsigned long long) 351 (unsigned long long)
367 BTRFS_I(inode)->generation, 352 BTRFS_I(inode)->generation);
368 (unsigned long long)
369 block_group->key.objectid);
370 kunmap(page); 353 kunmap(page);
371 unlock_page(page); 354 unlock_page(page);
372 page_cache_release(page); 355 page_cache_release(page);
@@ -382,9 +365,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
382 PAGE_CACHE_SIZE - start_offset); 365 PAGE_CACHE_SIZE - start_offset);
383 btrfs_csum_final(cur_crc, (char *)&cur_crc); 366 btrfs_csum_final(cur_crc, (char *)&cur_crc);
384 if (cur_crc != *crc) { 367 if (cur_crc != *crc) {
385 printk(KERN_ERR "btrfs: crc mismatch for page %lu in " 368 printk(KERN_ERR "btrfs: crc mismatch for page %lu\n",
386 "block group %llu\n", index, 369 index);
387 (unsigned long long)block_group->key.objectid);
388 kunmap(page); 370 kunmap(page);
389 unlock_page(page); 371 unlock_page(page);
390 page_cache_release(page); 372 page_cache_release(page);
@@ -417,9 +399,9 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
417 } 399 }
418 400
419 if (entry->type == BTRFS_FREE_SPACE_EXTENT) { 401 if (entry->type == BTRFS_FREE_SPACE_EXTENT) {
420 spin_lock(&block_group->tree_lock); 402 spin_lock(&ctl->tree_lock);
421 ret = link_free_space(block_group, e); 403 ret = link_free_space(ctl, e);
422 spin_unlock(&block_group->tree_lock); 404 spin_unlock(&ctl->tree_lock);
423 BUG_ON(ret); 405 BUG_ON(ret);
424 } else { 406 } else {
425 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 407 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
@@ -431,11 +413,11 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
431 page_cache_release(page); 413 page_cache_release(page);
432 goto free_cache; 414 goto free_cache;
433 } 415 }
434 spin_lock(&block_group->tree_lock); 416 spin_lock(&ctl->tree_lock);
435 ret = link_free_space(block_group, e); 417 ret2 = link_free_space(ctl, e);
436 block_group->total_bitmaps++; 418 ctl->total_bitmaps++;
437 recalculate_thresholds(block_group); 419 ctl->op->recalc_thresholds(ctl);
438 spin_unlock(&block_group->tree_lock); 420 spin_unlock(&ctl->tree_lock);
439 list_add_tail(&e->list, &bitmaps); 421 list_add_tail(&e->list, &bitmaps);
440 } 422 }
441 423
@@ -471,41 +453,97 @@ next:
471 index++; 453 index++;
472 } 454 }
473 455
474 spin_lock(&block_group->tree_lock);
475 if (block_group->free_space != (block_group->key.offset - used -
476 block_group->bytes_super)) {
477 spin_unlock(&block_group->tree_lock);
478 printk(KERN_ERR "block group %llu has an wrong amount of free "
479 "space\n", block_group->key.objectid);
480 ret = 0;
481 goto free_cache;
482 }
483 spin_unlock(&block_group->tree_lock);
484
485 ret = 1; 456 ret = 1;
486out: 457out:
487 kfree(checksums); 458 kfree(checksums);
488 kfree(disk_crcs); 459 kfree(disk_crcs);
489 iput(inode);
490 return ret; 460 return ret;
491
492free_cache: 461free_cache:
493 /* This cache is bogus, make sure it gets cleared */ 462 __btrfs_remove_free_space_cache(ctl);
463 goto out;
464}
465
466int load_free_space_cache(struct btrfs_fs_info *fs_info,
467 struct btrfs_block_group_cache *block_group)
468{
469 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
470 struct btrfs_root *root = fs_info->tree_root;
471 struct inode *inode;
472 struct btrfs_path *path;
473 int ret;
474 bool matched;
475 u64 used = btrfs_block_group_used(&block_group->item);
476
477 /*
478 * If we're unmounting then just return, since this does a search on the
479 * normal root and not the commit root and we could deadlock.
480 */
481 smp_mb();
482 if (fs_info->closing)
483 return 0;
484
485 /*
486 * If this block group has been marked to be cleared for one reason or
487 * another then we can't trust the on disk cache, so just return.
488 */
494 spin_lock(&block_group->lock); 489 spin_lock(&block_group->lock);
495 block_group->disk_cache_state = BTRFS_DC_CLEAR; 490 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
491 spin_unlock(&block_group->lock);
492 return 0;
493 }
496 spin_unlock(&block_group->lock); 494 spin_unlock(&block_group->lock);
497 btrfs_remove_free_space_cache(block_group); 495
498 goto out; 496 path = btrfs_alloc_path();
497 if (!path)
498 return 0;
499
500 inode = lookup_free_space_inode(root, block_group, path);
501 if (IS_ERR(inode)) {
502 btrfs_free_path(path);
503 return 0;
504 }
505
506 ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
507 path, block_group->key.objectid);
508 btrfs_free_path(path);
509 if (ret <= 0)
510 goto out;
511
512 spin_lock(&ctl->tree_lock);
513 matched = (ctl->free_space == (block_group->key.offset - used -
514 block_group->bytes_super));
515 spin_unlock(&ctl->tree_lock);
516
517 if (!matched) {
518 __btrfs_remove_free_space_cache(ctl);
519 printk(KERN_ERR "block group %llu has an wrong amount of free "
520 "space\n", block_group->key.objectid);
521 ret = -1;
522 }
523out:
524 if (ret < 0) {
525 /* This cache is bogus, make sure it gets cleared */
526 spin_lock(&block_group->lock);
527 block_group->disk_cache_state = BTRFS_DC_CLEAR;
528 spin_unlock(&block_group->lock);
529 ret = 0;
530
531 printk(KERN_ERR "btrfs: failed to load free space cache "
532 "for block group %llu\n", block_group->key.objectid);
533 }
534
535 iput(inode);
536 return ret;
499} 537}
500 538
501int btrfs_write_out_cache(struct btrfs_root *root, 539int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
502 struct btrfs_trans_handle *trans, 540 struct btrfs_free_space_ctl *ctl,
503 struct btrfs_block_group_cache *block_group, 541 struct btrfs_block_group_cache *block_group,
504 struct btrfs_path *path) 542 struct btrfs_trans_handle *trans,
543 struct btrfs_path *path, u64 offset)
505{ 544{
506 struct btrfs_free_space_header *header; 545 struct btrfs_free_space_header *header;
507 struct extent_buffer *leaf; 546 struct extent_buffer *leaf;
508 struct inode *inode;
509 struct rb_node *node; 547 struct rb_node *node;
510 struct list_head *pos, *n; 548 struct list_head *pos, *n;
511 struct page **pages; 549 struct page **pages;
@@ -522,35 +560,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
522 int index = 0, num_pages = 0; 560 int index = 0, num_pages = 0;
523 int entries = 0; 561 int entries = 0;
524 int bitmaps = 0; 562 int bitmaps = 0;
525 int ret = 0; 563 int ret = -1;
526 bool next_page = false; 564 bool next_page = false;
527 bool out_of_space = false; 565 bool out_of_space = false;
528 566
529 root = root->fs_info->tree_root;
530
531 INIT_LIST_HEAD(&bitmap_list); 567 INIT_LIST_HEAD(&bitmap_list);
532 568
533 spin_lock(&block_group->lock); 569 node = rb_first(&ctl->free_space_offset);
534 if (block_group->disk_cache_state < BTRFS_DC_SETUP) { 570 if (!node)
535 spin_unlock(&block_group->lock);
536 return 0;
537 }
538 spin_unlock(&block_group->lock);
539
540 inode = lookup_free_space_inode(root, block_group, path);
541 if (IS_ERR(inode))
542 return 0; 571 return 0;
543 572
544 if (!i_size_read(inode)) { 573 if (!i_size_read(inode))
545 iput(inode); 574 return -1;
546 return 0;
547 }
548
549 node = rb_first(&block_group->free_space_offset);
550 if (!node) {
551 iput(inode);
552 return 0;
553 }
554 575
555 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> 576 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
556 PAGE_CACHE_SHIFT; 577 PAGE_CACHE_SHIFT;
@@ -560,16 +581,13 @@ int btrfs_write_out_cache(struct btrfs_root *root,
560 581
561 /* We need a checksum per page. */ 582 /* We need a checksum per page. */
562 crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); 583 crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
563 if (!crc) { 584 if (!crc)
564 iput(inode); 585 return -1;
565 return 0;
566 }
567 586
568 pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); 587 pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
569 if (!pages) { 588 if (!pages) {
570 kfree(crc); 589 kfree(crc);
571 iput(inode); 590 return -1;
572 return 0;
573 } 591 }
574 592
575 /* Since the first page has all of our checksums and our generation we 593 /* Since the first page has all of our checksums and our generation we
@@ -579,7 +597,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
579 first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); 597 first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
580 598
581 /* Get the cluster for this block_group if it exists */ 599 /* Get the cluster for this block_group if it exists */
582 if (!list_empty(&block_group->cluster_list)) 600 if (block_group && !list_empty(&block_group->cluster_list))
583 cluster = list_entry(block_group->cluster_list.next, 601 cluster = list_entry(block_group->cluster_list.next,
584 struct btrfs_free_cluster, 602 struct btrfs_free_cluster,
585 block_group_list); 603 block_group_list);
@@ -621,7 +639,8 @@ int btrfs_write_out_cache(struct btrfs_root *root,
621 * When searching for pinned extents, we need to start at our start 639 * When searching for pinned extents, we need to start at our start
622 * offset. 640 * offset.
623 */ 641 */
624 start = block_group->key.objectid; 642 if (block_group)
643 start = block_group->key.objectid;
625 644
626 /* Write out the extent entries */ 645 /* Write out the extent entries */
627 do { 646 do {
@@ -679,8 +698,9 @@ int btrfs_write_out_cache(struct btrfs_root *root,
679 * We want to add any pinned extents to our free space cache 698 * We want to add any pinned extents to our free space cache
680 * so we don't leak the space 699 * so we don't leak the space
681 */ 700 */
682 while (!next_page && (start < block_group->key.objectid + 701 while (block_group && !next_page &&
683 block_group->key.offset)) { 702 (start < block_group->key.objectid +
703 block_group->key.offset)) {
684 ret = find_first_extent_bit(unpin, start, &start, &end, 704 ret = find_first_extent_bit(unpin, start, &start, &end,
685 EXTENT_DIRTY); 705 EXTENT_DIRTY);
686 if (ret) { 706 if (ret) {
@@ -798,12 +818,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
798 filemap_write_and_wait(inode->i_mapping); 818 filemap_write_and_wait(inode->i_mapping);
799 819
800 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 820 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
801 key.offset = block_group->key.objectid; 821 key.offset = offset;
802 key.type = 0; 822 key.type = 0;
803 823
804 ret = btrfs_search_slot(trans, root, &key, path, 1, 1); 824 ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
805 if (ret < 0) { 825 if (ret < 0) {
806 ret = 0; 826 ret = -1;
807 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, 827 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
808 EXTENT_DIRTY | EXTENT_DELALLOC | 828 EXTENT_DIRTY | EXTENT_DELALLOC |
809 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); 829 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
@@ -816,8 +836,8 @@ int btrfs_write_out_cache(struct btrfs_root *root,
816 path->slots[0]--; 836 path->slots[0]--;
817 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 837 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
818 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || 838 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
819 found_key.offset != block_group->key.objectid) { 839 found_key.offset != offset) {
820 ret = 0; 840 ret = -1;
821 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, 841 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
822 EXTENT_DIRTY | EXTENT_DELALLOC | 842 EXTENT_DIRTY | EXTENT_DELALLOC |
823 EXTENT_DO_ACCOUNTING, 0, 0, NULL, 843 EXTENT_DO_ACCOUNTING, 0, 0, NULL,
@@ -837,44 +857,78 @@ int btrfs_write_out_cache(struct btrfs_root *root,
837 ret = 1; 857 ret = 1;
838 858
839out_free: 859out_free:
840 if (ret == 0) { 860 if (ret != 1) {
841 invalidate_inode_pages2_range(inode->i_mapping, 0, index); 861 invalidate_inode_pages2_range(inode->i_mapping, 0, index);
842 spin_lock(&block_group->lock);
843 block_group->disk_cache_state = BTRFS_DC_ERROR;
844 spin_unlock(&block_group->lock);
845 BTRFS_I(inode)->generation = 0; 862 BTRFS_I(inode)->generation = 0;
846 } 863 }
847 kfree(checksums); 864 kfree(checksums);
848 kfree(pages); 865 kfree(pages);
849 btrfs_update_inode(trans, root, inode); 866 btrfs_update_inode(trans, root, inode);
867 return ret;
868}
869
870int btrfs_write_out_cache(struct btrfs_root *root,
871 struct btrfs_trans_handle *trans,
872 struct btrfs_block_group_cache *block_group,
873 struct btrfs_path *path)
874{
875 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
876 struct inode *inode;
877 int ret = 0;
878
879 root = root->fs_info->tree_root;
880
881 spin_lock(&block_group->lock);
882 if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
883 spin_unlock(&block_group->lock);
884 return 0;
885 }
886 spin_unlock(&block_group->lock);
887
888 inode = lookup_free_space_inode(root, block_group, path);
889 if (IS_ERR(inode))
890 return 0;
891
892 ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
893 path, block_group->key.objectid);
894 if (ret < 0) {
895 spin_lock(&block_group->lock);
896 block_group->disk_cache_state = BTRFS_DC_ERROR;
897 spin_unlock(&block_group->lock);
898 ret = 0;
899
900 printk(KERN_ERR "btrfs: failed to write free space cace "
901 "for block group %llu\n", block_group->key.objectid);
902 }
903
850 iput(inode); 904 iput(inode);
851 return ret; 905 return ret;
852} 906}
853 907
854static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, 908static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
855 u64 offset) 909 u64 offset)
856{ 910{
857 BUG_ON(offset < bitmap_start); 911 BUG_ON(offset < bitmap_start);
858 offset -= bitmap_start; 912 offset -= bitmap_start;
859 return (unsigned long)(div64_u64(offset, sectorsize)); 913 return (unsigned long)(div_u64(offset, unit));
860} 914}
861 915
862static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize) 916static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
863{ 917{
864 return (unsigned long)(div64_u64(bytes, sectorsize)); 918 return (unsigned long)(div_u64(bytes, unit));
865} 919}
866 920
867static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group, 921static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
868 u64 offset) 922 u64 offset)
869{ 923{
870 u64 bitmap_start; 924 u64 bitmap_start;
871 u64 bytes_per_bitmap; 925 u64 bytes_per_bitmap;
872 926
873 bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize; 927 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
874 bitmap_start = offset - block_group->key.objectid; 928 bitmap_start = offset - ctl->start;
875 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); 929 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
876 bitmap_start *= bytes_per_bitmap; 930 bitmap_start *= bytes_per_bitmap;
877 bitmap_start += block_group->key.objectid; 931 bitmap_start += ctl->start;
878 932
879 return bitmap_start; 933 return bitmap_start;
880} 934}
@@ -932,10 +986,10 @@ static int tree_insert_offset(struct rb_root *root, u64 offset,
932 * offset. 986 * offset.
933 */ 987 */
934static struct btrfs_free_space * 988static struct btrfs_free_space *
935tree_search_offset(struct btrfs_block_group_cache *block_group, 989tree_search_offset(struct btrfs_free_space_ctl *ctl,
936 u64 offset, int bitmap_only, int fuzzy) 990 u64 offset, int bitmap_only, int fuzzy)
937{ 991{
938 struct rb_node *n = block_group->free_space_offset.rb_node; 992 struct rb_node *n = ctl->free_space_offset.rb_node;
939 struct btrfs_free_space *entry, *prev = NULL; 993 struct btrfs_free_space *entry, *prev = NULL;
940 994
941 /* find entry that is closest to the 'offset' */ 995 /* find entry that is closest to the 'offset' */
@@ -1031,8 +1085,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
1031 break; 1085 break;
1032 } 1086 }
1033 } 1087 }
1034 if (entry->offset + BITS_PER_BITMAP * 1088 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1035 block_group->sectorsize > offset)
1036 return entry; 1089 return entry;
1037 } else if (entry->offset + entry->bytes > offset) 1090 } else if (entry->offset + entry->bytes > offset)
1038 return entry; 1091 return entry;
@@ -1043,7 +1096,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
1043 while (1) { 1096 while (1) {
1044 if (entry->bitmap) { 1097 if (entry->bitmap) {
1045 if (entry->offset + BITS_PER_BITMAP * 1098 if (entry->offset + BITS_PER_BITMAP *
1046 block_group->sectorsize > offset) 1099 ctl->unit > offset)
1047 break; 1100 break;
1048 } else { 1101 } else {
1049 if (entry->offset + entry->bytes > offset) 1102 if (entry->offset + entry->bytes > offset)
@@ -1059,42 +1112,47 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
1059} 1112}
1060 1113
1061static inline void 1114static inline void
1062__unlink_free_space(struct btrfs_block_group_cache *block_group, 1115__unlink_free_space(struct btrfs_free_space_ctl *ctl,
1063 struct btrfs_free_space *info) 1116 struct btrfs_free_space *info)
1064{ 1117{
1065 rb_erase(&info->offset_index, &block_group->free_space_offset); 1118 rb_erase(&info->offset_index, &ctl->free_space_offset);
1066 block_group->free_extents--; 1119 ctl->free_extents--;
1067} 1120}
1068 1121
1069static void unlink_free_space(struct btrfs_block_group_cache *block_group, 1122static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1070 struct btrfs_free_space *info) 1123 struct btrfs_free_space *info)
1071{ 1124{
1072 __unlink_free_space(block_group, info); 1125 __unlink_free_space(ctl, info);
1073 block_group->free_space -= info->bytes; 1126 ctl->free_space -= info->bytes;
1074} 1127}
1075 1128
1076static int link_free_space(struct btrfs_block_group_cache *block_group, 1129static int link_free_space(struct btrfs_free_space_ctl *ctl,
1077 struct btrfs_free_space *info) 1130 struct btrfs_free_space *info)
1078{ 1131{
1079 int ret = 0; 1132 int ret = 0;
1080 1133
1081 BUG_ON(!info->bitmap && !info->bytes); 1134 BUG_ON(!info->bitmap && !info->bytes);
1082 ret = tree_insert_offset(&block_group->free_space_offset, info->offset, 1135 ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1083 &info->offset_index, (info->bitmap != NULL)); 1136 &info->offset_index, (info->bitmap != NULL));
1084 if (ret) 1137 if (ret)
1085 return ret; 1138 return ret;
1086 1139
1087 block_group->free_space += info->bytes; 1140 ctl->free_space += info->bytes;
1088 block_group->free_extents++; 1141 ctl->free_extents++;
1089 return ret; 1142 return ret;
1090} 1143}
1091 1144
1092static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) 1145static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1093{ 1146{
1147 struct btrfs_block_group_cache *block_group = ctl->private;
1094 u64 max_bytes; 1148 u64 max_bytes;
1095 u64 bitmap_bytes; 1149 u64 bitmap_bytes;
1096 u64 extent_bytes; 1150 u64 extent_bytes;
1097 u64 size = block_group->key.offset; 1151 u64 size = block_group->key.offset;
1152 u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
1153 int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1154
1155 BUG_ON(ctl->total_bitmaps > max_bitmaps);
1098 1156
1099 /* 1157 /*
1100 * The goal is to keep the total amount of memory used per 1gb of space 1158 * The goal is to keep the total amount of memory used per 1gb of space
@@ -1112,10 +1170,10 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
1112 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as 1170 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1113 * we add more bitmaps. 1171 * we add more bitmaps.
1114 */ 1172 */
1115 bitmap_bytes = (block_group->total_bitmaps + 1) * PAGE_CACHE_SIZE; 1173 bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
1116 1174
1117 if (bitmap_bytes >= max_bytes) { 1175 if (bitmap_bytes >= max_bytes) {
1118 block_group->extents_thresh = 0; 1176 ctl->extents_thresh = 0;
1119 return; 1177 return;
1120 } 1178 }
1121 1179
@@ -1126,47 +1184,43 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
1126 extent_bytes = max_bytes - bitmap_bytes; 1184 extent_bytes = max_bytes - bitmap_bytes;
1127 extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2)); 1185 extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
1128 1186
1129 block_group->extents_thresh = 1187 ctl->extents_thresh =
1130 div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); 1188 div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
1131} 1189}
1132 1190
1133static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, 1191static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1134 struct btrfs_free_space *info, u64 offset, 1192 struct btrfs_free_space *info, u64 offset,
1135 u64 bytes) 1193 u64 bytes)
1136{ 1194{
1137 unsigned long start, end; 1195 unsigned long start, count;
1138 unsigned long i;
1139 1196
1140 start = offset_to_bit(info->offset, block_group->sectorsize, offset); 1197 start = offset_to_bit(info->offset, ctl->unit, offset);
1141 end = start + bytes_to_bits(bytes, block_group->sectorsize); 1198 count = bytes_to_bits(bytes, ctl->unit);
1142 BUG_ON(end > BITS_PER_BITMAP); 1199 BUG_ON(start + count > BITS_PER_BITMAP);
1143 1200
1144 for (i = start; i < end; i++) 1201 bitmap_clear(info->bitmap, start, count);
1145 clear_bit(i, info->bitmap);
1146 1202
1147 info->bytes -= bytes; 1203 info->bytes -= bytes;
1148 block_group->free_space -= bytes; 1204 ctl->free_space -= bytes;
1149} 1205}
1150 1206
1151static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, 1207static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1152 struct btrfs_free_space *info, u64 offset, 1208 struct btrfs_free_space *info, u64 offset,
1153 u64 bytes) 1209 u64 bytes)
1154{ 1210{
1155 unsigned long start, end; 1211 unsigned long start, count;
1156 unsigned long i;
1157 1212
1158 start = offset_to_bit(info->offset, block_group->sectorsize, offset); 1213 start = offset_to_bit(info->offset, ctl->unit, offset);
1159 end = start + bytes_to_bits(bytes, block_group->sectorsize); 1214 count = bytes_to_bits(bytes, ctl->unit);
1160 BUG_ON(end > BITS_PER_BITMAP); 1215 BUG_ON(start + count > BITS_PER_BITMAP);
1161 1216
1162 for (i = start; i < end; i++) 1217 bitmap_set(info->bitmap, start, count);
1163 set_bit(i, info->bitmap);
1164 1218
1165 info->bytes += bytes; 1219 info->bytes += bytes;
1166 block_group->free_space += bytes; 1220 ctl->free_space += bytes;
1167} 1221}
1168 1222
1169static int search_bitmap(struct btrfs_block_group_cache *block_group, 1223static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1170 struct btrfs_free_space *bitmap_info, u64 *offset, 1224 struct btrfs_free_space *bitmap_info, u64 *offset,
1171 u64 *bytes) 1225 u64 *bytes)
1172{ 1226{
@@ -1174,9 +1228,9 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group,
1174 unsigned long bits, i; 1228 unsigned long bits, i;
1175 unsigned long next_zero; 1229 unsigned long next_zero;
1176 1230
1177 i = offset_to_bit(bitmap_info->offset, block_group->sectorsize, 1231 i = offset_to_bit(bitmap_info->offset, ctl->unit,
1178 max_t(u64, *offset, bitmap_info->offset)); 1232 max_t(u64, *offset, bitmap_info->offset));
1179 bits = bytes_to_bits(*bytes, block_group->sectorsize); 1233 bits = bytes_to_bits(*bytes, ctl->unit);
1180 1234
1181 for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); 1235 for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
1182 i < BITS_PER_BITMAP; 1236 i < BITS_PER_BITMAP;
@@ -1191,29 +1245,25 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group,
1191 } 1245 }
1192 1246
1193 if (found_bits) { 1247 if (found_bits) {
1194 *offset = (u64)(i * block_group->sectorsize) + 1248 *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1195 bitmap_info->offset; 1249 *bytes = (u64)(found_bits) * ctl->unit;
1196 *bytes = (u64)(found_bits) * block_group->sectorsize;
1197 return 0; 1250 return 0;
1198 } 1251 }
1199 1252
1200 return -1; 1253 return -1;
1201} 1254}
1202 1255
1203static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache 1256static struct btrfs_free_space *
1204 *block_group, u64 *offset, 1257find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
1205 u64 *bytes, int debug)
1206{ 1258{
1207 struct btrfs_free_space *entry; 1259 struct btrfs_free_space *entry;
1208 struct rb_node *node; 1260 struct rb_node *node;
1209 int ret; 1261 int ret;
1210 1262
1211 if (!block_group->free_space_offset.rb_node) 1263 if (!ctl->free_space_offset.rb_node)
1212 return NULL; 1264 return NULL;
1213 1265
1214 entry = tree_search_offset(block_group, 1266 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1215 offset_to_bitmap(block_group, *offset),
1216 0, 1);
1217 if (!entry) 1267 if (!entry)
1218 return NULL; 1268 return NULL;
1219 1269
@@ -1223,7 +1273,7 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
1223 continue; 1273 continue;
1224 1274
1225 if (entry->bitmap) { 1275 if (entry->bitmap) {
1226 ret = search_bitmap(block_group, entry, offset, bytes); 1276 ret = search_bitmap(ctl, entry, offset, bytes);
1227 if (!ret) 1277 if (!ret)
1228 return entry; 1278 return entry;
1229 continue; 1279 continue;
@@ -1237,33 +1287,28 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
1237 return NULL; 1287 return NULL;
1238} 1288}
1239 1289
1240static void add_new_bitmap(struct btrfs_block_group_cache *block_group, 1290static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1241 struct btrfs_free_space *info, u64 offset) 1291 struct btrfs_free_space *info, u64 offset)
1242{ 1292{
1243 u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; 1293 info->offset = offset_to_bitmap(ctl, offset);
1244 int max_bitmaps = (int)div64_u64(block_group->key.offset +
1245 bytes_per_bg - 1, bytes_per_bg);
1246 BUG_ON(block_group->total_bitmaps >= max_bitmaps);
1247
1248 info->offset = offset_to_bitmap(block_group, offset);
1249 info->bytes = 0; 1294 info->bytes = 0;
1250 link_free_space(block_group, info); 1295 link_free_space(ctl, info);
1251 block_group->total_bitmaps++; 1296 ctl->total_bitmaps++;
1252 1297
1253 recalculate_thresholds(block_group); 1298 ctl->op->recalc_thresholds(ctl);
1254} 1299}
1255 1300
1256static void free_bitmap(struct btrfs_block_group_cache *block_group, 1301static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1257 struct btrfs_free_space *bitmap_info) 1302 struct btrfs_free_space *bitmap_info)
1258{ 1303{
1259 unlink_free_space(block_group, bitmap_info); 1304 unlink_free_space(ctl, bitmap_info);
1260 kfree(bitmap_info->bitmap); 1305 kfree(bitmap_info->bitmap);
1261 kmem_cache_free(btrfs_free_space_cachep, bitmap_info); 1306 kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1262 block_group->total_bitmaps--; 1307 ctl->total_bitmaps--;
1263 recalculate_thresholds(block_group); 1308 ctl->op->recalc_thresholds(ctl);
1264} 1309}
1265 1310
1266static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, 1311static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1267 struct btrfs_free_space *bitmap_info, 1312 struct btrfs_free_space *bitmap_info,
1268 u64 *offset, u64 *bytes) 1313 u64 *offset, u64 *bytes)
1269{ 1314{
@@ -1272,8 +1317,7 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro
1272 int ret; 1317 int ret;
1273 1318
1274again: 1319again:
1275 end = bitmap_info->offset + 1320 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1276 (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1;
1277 1321
1278 /* 1322 /*
1279 * XXX - this can go away after a few releases. 1323 * XXX - this can go away after a few releases.
@@ -1288,24 +1332,22 @@ again:
1288 search_start = *offset; 1332 search_start = *offset;
1289 search_bytes = *bytes; 1333 search_bytes = *bytes;
1290 search_bytes = min(search_bytes, end - search_start + 1); 1334 search_bytes = min(search_bytes, end - search_start + 1);
1291 ret = search_bitmap(block_group, bitmap_info, &search_start, 1335 ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
1292 &search_bytes);
1293 BUG_ON(ret < 0 || search_start != *offset); 1336 BUG_ON(ret < 0 || search_start != *offset);
1294 1337
1295 if (*offset > bitmap_info->offset && *offset + *bytes > end) { 1338 if (*offset > bitmap_info->offset && *offset + *bytes > end) {
1296 bitmap_clear_bits(block_group, bitmap_info, *offset, 1339 bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
1297 end - *offset + 1);
1298 *bytes -= end - *offset + 1; 1340 *bytes -= end - *offset + 1;
1299 *offset = end + 1; 1341 *offset = end + 1;
1300 } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { 1342 } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
1301 bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes); 1343 bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
1302 *bytes = 0; 1344 *bytes = 0;
1303 } 1345 }
1304 1346
1305 if (*bytes) { 1347 if (*bytes) {
1306 struct rb_node *next = rb_next(&bitmap_info->offset_index); 1348 struct rb_node *next = rb_next(&bitmap_info->offset_index);
1307 if (!bitmap_info->bytes) 1349 if (!bitmap_info->bytes)
1308 free_bitmap(block_group, bitmap_info); 1350 free_bitmap(ctl, bitmap_info);
1309 1351
1310 /* 1352 /*
1311 * no entry after this bitmap, but we still have bytes to 1353 * no entry after this bitmap, but we still have bytes to
@@ -1332,31 +1374,28 @@ again:
1332 */ 1374 */
1333 search_start = *offset; 1375 search_start = *offset;
1334 search_bytes = *bytes; 1376 search_bytes = *bytes;
1335 ret = search_bitmap(block_group, bitmap_info, &search_start, 1377 ret = search_bitmap(ctl, bitmap_info, &search_start,
1336 &search_bytes); 1378 &search_bytes);
1337 if (ret < 0 || search_start != *offset) 1379 if (ret < 0 || search_start != *offset)
1338 return -EAGAIN; 1380 return -EAGAIN;
1339 1381
1340 goto again; 1382 goto again;
1341 } else if (!bitmap_info->bytes) 1383 } else if (!bitmap_info->bytes)
1342 free_bitmap(block_group, bitmap_info); 1384 free_bitmap(ctl, bitmap_info);
1343 1385
1344 return 0; 1386 return 0;
1345} 1387}
1346 1388
1347static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, 1389static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1348 struct btrfs_free_space *info) 1390 struct btrfs_free_space *info)
1349{ 1391{
1350 struct btrfs_free_space *bitmap_info; 1392 struct btrfs_block_group_cache *block_group = ctl->private;
1351 int added = 0;
1352 u64 bytes, offset, end;
1353 int ret;
1354 1393
1355 /* 1394 /*
1356 * If we are below the extents threshold then we can add this as an 1395 * If we are below the extents threshold then we can add this as an
1357 * extent, and don't have to deal with the bitmap 1396 * extent, and don't have to deal with the bitmap
1358 */ 1397 */
1359 if (block_group->free_extents < block_group->extents_thresh) { 1398 if (ctl->free_extents < ctl->extents_thresh) {
1360 /* 1399 /*
1361 * If this block group has some small extents we don't want to 1400 * If this block group has some small extents we don't want to
1362 * use up all of our free slots in the cache with them, we want 1401 * use up all of our free slots in the cache with them, we want
@@ -1365,11 +1404,10 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
1365 * the overhead of a bitmap if we don't have to. 1404 * the overhead of a bitmap if we don't have to.
1366 */ 1405 */
1367 if (info->bytes <= block_group->sectorsize * 4) { 1406 if (info->bytes <= block_group->sectorsize * 4) {
1368 if (block_group->free_extents * 2 <= 1407 if (ctl->free_extents * 2 <= ctl->extents_thresh)
1369 block_group->extents_thresh) 1408 return false;
1370 return 0;
1371 } else { 1409 } else {
1372 return 0; 1410 return false;
1373 } 1411 }
1374 } 1412 }
1375 1413
@@ -1379,31 +1417,42 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
1379 */ 1417 */
1380 if (BITS_PER_BITMAP * block_group->sectorsize > 1418 if (BITS_PER_BITMAP * block_group->sectorsize >
1381 block_group->key.offset) 1419 block_group->key.offset)
1382 return 0; 1420 return false;
1421
1422 return true;
1423}
1424
1425static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
1426 struct btrfs_free_space *info)
1427{
1428 struct btrfs_free_space *bitmap_info;
1429 int added = 0;
1430 u64 bytes, offset, end;
1431 int ret;
1383 1432
1384 bytes = info->bytes; 1433 bytes = info->bytes;
1385 offset = info->offset; 1434 offset = info->offset;
1386 1435
1436 if (!ctl->op->use_bitmap(ctl, info))
1437 return 0;
1438
1387again: 1439again:
1388 bitmap_info = tree_search_offset(block_group, 1440 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1389 offset_to_bitmap(block_group, offset),
1390 1, 0); 1441 1, 0);
1391 if (!bitmap_info) { 1442 if (!bitmap_info) {
1392 BUG_ON(added); 1443 BUG_ON(added);
1393 goto new_bitmap; 1444 goto new_bitmap;
1394 } 1445 }
1395 1446
1396 end = bitmap_info->offset + 1447 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1397 (u64)(BITS_PER_BITMAP * block_group->sectorsize);
1398 1448
1399 if (offset >= bitmap_info->offset && offset + bytes > end) { 1449 if (offset >= bitmap_info->offset && offset + bytes > end) {
1400 bitmap_set_bits(block_group, bitmap_info, offset, 1450 bitmap_set_bits(ctl, bitmap_info, offset, end - offset);
1401 end - offset);
1402 bytes -= end - offset; 1451 bytes -= end - offset;
1403 offset = end; 1452 offset = end;
1404 added = 0; 1453 added = 0;
1405 } else if (offset >= bitmap_info->offset && offset + bytes <= end) { 1454 } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
1406 bitmap_set_bits(block_group, bitmap_info, offset, bytes); 1455 bitmap_set_bits(ctl, bitmap_info, offset, bytes);
1407 bytes = 0; 1456 bytes = 0;
1408 } else { 1457 } else {
1409 BUG(); 1458 BUG();
@@ -1417,19 +1466,19 @@ again:
1417 1466
1418new_bitmap: 1467new_bitmap:
1419 if (info && info->bitmap) { 1468 if (info && info->bitmap) {
1420 add_new_bitmap(block_group, info, offset); 1469 add_new_bitmap(ctl, info, offset);
1421 added = 1; 1470 added = 1;
1422 info = NULL; 1471 info = NULL;
1423 goto again; 1472 goto again;
1424 } else { 1473 } else {
1425 spin_unlock(&block_group->tree_lock); 1474 spin_unlock(&ctl->tree_lock);
1426 1475
1427 /* no pre-allocated info, allocate a new one */ 1476 /* no pre-allocated info, allocate a new one */
1428 if (!info) { 1477 if (!info) {
1429 info = kmem_cache_zalloc(btrfs_free_space_cachep, 1478 info = kmem_cache_zalloc(btrfs_free_space_cachep,
1430 GFP_NOFS); 1479 GFP_NOFS);
1431 if (!info) { 1480 if (!info) {
1432 spin_lock(&block_group->tree_lock); 1481 spin_lock(&ctl->tree_lock);
1433 ret = -ENOMEM; 1482 ret = -ENOMEM;
1434 goto out; 1483 goto out;
1435 } 1484 }
@@ -1437,7 +1486,7 @@ new_bitmap:
1437 1486
1438 /* allocate the bitmap */ 1487 /* allocate the bitmap */
1439 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 1488 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
1440 spin_lock(&block_group->tree_lock); 1489 spin_lock(&ctl->tree_lock);
1441 if (!info->bitmap) { 1490 if (!info->bitmap) {
1442 ret = -ENOMEM; 1491 ret = -ENOMEM;
1443 goto out; 1492 goto out;
@@ -1455,7 +1504,7 @@ out:
1455 return ret; 1504 return ret;
1456} 1505}
1457 1506
1458bool try_merge_free_space(struct btrfs_block_group_cache *block_group, 1507bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
1459 struct btrfs_free_space *info, bool update_stat) 1508 struct btrfs_free_space *info, bool update_stat)
1460{ 1509{
1461 struct btrfs_free_space *left_info; 1510 struct btrfs_free_space *left_info;
@@ -1469,18 +1518,18 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
1469 * are adding, if there is remove that struct and add a new one to 1518 * are adding, if there is remove that struct and add a new one to
1470 * cover the entire range 1519 * cover the entire range
1471 */ 1520 */
1472 right_info = tree_search_offset(block_group, offset + bytes, 0, 0); 1521 right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
1473 if (right_info && rb_prev(&right_info->offset_index)) 1522 if (right_info && rb_prev(&right_info->offset_index))
1474 left_info = rb_entry(rb_prev(&right_info->offset_index), 1523 left_info = rb_entry(rb_prev(&right_info->offset_index),
1475 struct btrfs_free_space, offset_index); 1524 struct btrfs_free_space, offset_index);
1476 else 1525 else
1477 left_info = tree_search_offset(block_group, offset - 1, 0, 0); 1526 left_info = tree_search_offset(ctl, offset - 1, 0, 0);
1478 1527
1479 if (right_info && !right_info->bitmap) { 1528 if (right_info && !right_info->bitmap) {
1480 if (update_stat) 1529 if (update_stat)
1481 unlink_free_space(block_group, right_info); 1530 unlink_free_space(ctl, right_info);
1482 else 1531 else
1483 __unlink_free_space(block_group, right_info); 1532 __unlink_free_space(ctl, right_info);
1484 info->bytes += right_info->bytes; 1533 info->bytes += right_info->bytes;
1485 kmem_cache_free(btrfs_free_space_cachep, right_info); 1534 kmem_cache_free(btrfs_free_space_cachep, right_info);
1486 merged = true; 1535 merged = true;
@@ -1489,9 +1538,9 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
1489 if (left_info && !left_info->bitmap && 1538 if (left_info && !left_info->bitmap &&
1490 left_info->offset + left_info->bytes == offset) { 1539 left_info->offset + left_info->bytes == offset) {
1491 if (update_stat) 1540 if (update_stat)
1492 unlink_free_space(block_group, left_info); 1541 unlink_free_space(ctl, left_info);
1493 else 1542 else
1494 __unlink_free_space(block_group, left_info); 1543 __unlink_free_space(ctl, left_info);
1495 info->offset = left_info->offset; 1544 info->offset = left_info->offset;
1496 info->bytes += left_info->bytes; 1545 info->bytes += left_info->bytes;
1497 kmem_cache_free(btrfs_free_space_cachep, left_info); 1546 kmem_cache_free(btrfs_free_space_cachep, left_info);
@@ -1501,8 +1550,8 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
1501 return merged; 1550 return merged;
1502} 1551}
1503 1552
1504int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, 1553int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
1505 u64 offset, u64 bytes) 1554 u64 offset, u64 bytes)
1506{ 1555{
1507 struct btrfs_free_space *info; 1556 struct btrfs_free_space *info;
1508 int ret = 0; 1557 int ret = 0;
@@ -1514,9 +1563,9 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
1514 info->offset = offset; 1563 info->offset = offset;
1515 info->bytes = bytes; 1564 info->bytes = bytes;
1516 1565
1517 spin_lock(&block_group->tree_lock); 1566 spin_lock(&ctl->tree_lock);
1518 1567
1519 if (try_merge_free_space(block_group, info, true)) 1568 if (try_merge_free_space(ctl, info, true))
1520 goto link; 1569 goto link;
1521 1570
1522 /* 1571 /*
@@ -1524,7 +1573,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
1524 * extent then we know we're going to have to allocate a new extent, so 1573 * extent then we know we're going to have to allocate a new extent, so
1525 * before we do that see if we need to drop this into a bitmap 1574 * before we do that see if we need to drop this into a bitmap
1526 */ 1575 */
1527 ret = insert_into_bitmap(block_group, info); 1576 ret = insert_into_bitmap(ctl, info);
1528 if (ret < 0) { 1577 if (ret < 0) {
1529 goto out; 1578 goto out;
1530 } else if (ret) { 1579 } else if (ret) {
@@ -1532,11 +1581,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
1532 goto out; 1581 goto out;
1533 } 1582 }
1534link: 1583link:
1535 ret = link_free_space(block_group, info); 1584 ret = link_free_space(ctl, info);
1536 if (ret) 1585 if (ret)
1537 kmem_cache_free(btrfs_free_space_cachep, info); 1586 kmem_cache_free(btrfs_free_space_cachep, info);
1538out: 1587out:
1539 spin_unlock(&block_group->tree_lock); 1588 spin_unlock(&ctl->tree_lock);
1540 1589
1541 if (ret) { 1590 if (ret) {
1542 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); 1591 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
@@ -1549,21 +1598,21 @@ out:
1549int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, 1598int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1550 u64 offset, u64 bytes) 1599 u64 offset, u64 bytes)
1551{ 1600{
1601 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1552 struct btrfs_free_space *info; 1602 struct btrfs_free_space *info;
1553 struct btrfs_free_space *next_info = NULL; 1603 struct btrfs_free_space *next_info = NULL;
1554 int ret = 0; 1604 int ret = 0;
1555 1605
1556 spin_lock(&block_group->tree_lock); 1606 spin_lock(&ctl->tree_lock);
1557 1607
1558again: 1608again:
1559 info = tree_search_offset(block_group, offset, 0, 0); 1609 info = tree_search_offset(ctl, offset, 0, 0);
1560 if (!info) { 1610 if (!info) {
1561 /* 1611 /*
1562 * oops didn't find an extent that matched the space we wanted 1612 * oops didn't find an extent that matched the space we wanted
1563 * to remove, look for a bitmap instead 1613 * to remove, look for a bitmap instead
1564 */ 1614 */
1565 info = tree_search_offset(block_group, 1615 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1566 offset_to_bitmap(block_group, offset),
1567 1, 0); 1616 1, 0);
1568 if (!info) { 1617 if (!info) {
1569 WARN_ON(1); 1618 WARN_ON(1);
@@ -1578,8 +1627,8 @@ again:
1578 offset_index); 1627 offset_index);
1579 1628
1580 if (next_info->bitmap) 1629 if (next_info->bitmap)
1581 end = next_info->offset + BITS_PER_BITMAP * 1630 end = next_info->offset +
1582 block_group->sectorsize - 1; 1631 BITS_PER_BITMAP * ctl->unit - 1;
1583 else 1632 else
1584 end = next_info->offset + next_info->bytes; 1633 end = next_info->offset + next_info->bytes;
1585 1634
@@ -1599,20 +1648,20 @@ again:
1599 } 1648 }
1600 1649
1601 if (info->bytes == bytes) { 1650 if (info->bytes == bytes) {
1602 unlink_free_space(block_group, info); 1651 unlink_free_space(ctl, info);
1603 if (info->bitmap) { 1652 if (info->bitmap) {
1604 kfree(info->bitmap); 1653 kfree(info->bitmap);
1605 block_group->total_bitmaps--; 1654 ctl->total_bitmaps--;
1606 } 1655 }
1607 kmem_cache_free(btrfs_free_space_cachep, info); 1656 kmem_cache_free(btrfs_free_space_cachep, info);
1608 goto out_lock; 1657 goto out_lock;
1609 } 1658 }
1610 1659
1611 if (!info->bitmap && info->offset == offset) { 1660 if (!info->bitmap && info->offset == offset) {
1612 unlink_free_space(block_group, info); 1661 unlink_free_space(ctl, info);
1613 info->offset += bytes; 1662 info->offset += bytes;
1614 info->bytes -= bytes; 1663 info->bytes -= bytes;
1615 link_free_space(block_group, info); 1664 link_free_space(ctl, info);
1616 goto out_lock; 1665 goto out_lock;
1617 } 1666 }
1618 1667
@@ -1626,13 +1675,13 @@ again:
1626 * first unlink the old info and then 1675 * first unlink the old info and then
1627 * insert it again after the hole we're creating 1676 * insert it again after the hole we're creating
1628 */ 1677 */
1629 unlink_free_space(block_group, info); 1678 unlink_free_space(ctl, info);
1630 if (offset + bytes < info->offset + info->bytes) { 1679 if (offset + bytes < info->offset + info->bytes) {
1631 u64 old_end = info->offset + info->bytes; 1680 u64 old_end = info->offset + info->bytes;
1632 1681
1633 info->offset = offset + bytes; 1682 info->offset = offset + bytes;
1634 info->bytes = old_end - info->offset; 1683 info->bytes = old_end - info->offset;
1635 ret = link_free_space(block_group, info); 1684 ret = link_free_space(ctl, info);
1636 WARN_ON(ret); 1685 WARN_ON(ret);
1637 if (ret) 1686 if (ret)
1638 goto out_lock; 1687 goto out_lock;
@@ -1642,7 +1691,7 @@ again:
1642 */ 1691 */
1643 kmem_cache_free(btrfs_free_space_cachep, info); 1692 kmem_cache_free(btrfs_free_space_cachep, info);
1644 } 1693 }
1645 spin_unlock(&block_group->tree_lock); 1694 spin_unlock(&ctl->tree_lock);
1646 1695
1647 /* step two, insert a new info struct to cover 1696 /* step two, insert a new info struct to cover
1648 * anything before the hole 1697 * anything before the hole
@@ -1653,12 +1702,12 @@ again:
1653 goto out; 1702 goto out;
1654 } 1703 }
1655 1704
1656 ret = remove_from_bitmap(block_group, info, &offset, &bytes); 1705 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1657 if (ret == -EAGAIN) 1706 if (ret == -EAGAIN)
1658 goto again; 1707 goto again;
1659 BUG_ON(ret); 1708 BUG_ON(ret);
1660out_lock: 1709out_lock:
1661 spin_unlock(&block_group->tree_lock); 1710 spin_unlock(&ctl->tree_lock);
1662out: 1711out:
1663 return ret; 1712 return ret;
1664} 1713}
@@ -1666,11 +1715,12 @@ out:
1666void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, 1715void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
1667 u64 bytes) 1716 u64 bytes)
1668{ 1717{
1718 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1669 struct btrfs_free_space *info; 1719 struct btrfs_free_space *info;
1670 struct rb_node *n; 1720 struct rb_node *n;
1671 int count = 0; 1721 int count = 0;
1672 1722
1673 for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) { 1723 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
1674 info = rb_entry(n, struct btrfs_free_space, offset_index); 1724 info = rb_entry(n, struct btrfs_free_space, offset_index);
1675 if (info->bytes >= bytes) 1725 if (info->bytes >= bytes)
1676 count++; 1726 count++;
@@ -1685,19 +1735,28 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
1685 "\n", count); 1735 "\n", count);
1686} 1736}
1687 1737
1688u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group) 1738static struct btrfs_free_space_op free_space_op = {
1739 .recalc_thresholds = recalculate_thresholds,
1740 .use_bitmap = use_bitmap,
1741};
1742
1743void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
1689{ 1744{
1690 struct btrfs_free_space *info; 1745 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1691 struct rb_node *n;
1692 u64 ret = 0;
1693 1746
1694 for (n = rb_first(&block_group->free_space_offset); n; 1747 spin_lock_init(&ctl->tree_lock);
1695 n = rb_next(n)) { 1748 ctl->unit = block_group->sectorsize;
1696 info = rb_entry(n, struct btrfs_free_space, offset_index); 1749 ctl->start = block_group->key.objectid;
1697 ret += info->bytes; 1750 ctl->private = block_group;
1698 } 1751 ctl->op = &free_space_op;
1699 1752
1700 return ret; 1753 /*
1754 * we only want to have 32k of ram per block group for keeping
1755 * track of free space, and if we pass 1/2 of that we want to
1756 * start converting things over to using bitmaps
1757 */
1758 ctl->extents_thresh = ((1024 * 32) / 2) /
1759 sizeof(struct btrfs_free_space);
1701} 1760}
1702 1761
1703/* 1762/*
@@ -1711,6 +1770,7 @@ __btrfs_return_cluster_to_free_space(
1711 struct btrfs_block_group_cache *block_group, 1770 struct btrfs_block_group_cache *block_group,
1712 struct btrfs_free_cluster *cluster) 1771 struct btrfs_free_cluster *cluster)
1713{ 1772{
1773 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1714 struct btrfs_free_space *entry; 1774 struct btrfs_free_space *entry;
1715 struct rb_node *node; 1775 struct rb_node *node;
1716 1776
@@ -1732,8 +1792,8 @@ __btrfs_return_cluster_to_free_space(
1732 1792
1733 bitmap = (entry->bitmap != NULL); 1793 bitmap = (entry->bitmap != NULL);
1734 if (!bitmap) 1794 if (!bitmap)
1735 try_merge_free_space(block_group, entry, false); 1795 try_merge_free_space(ctl, entry, false);
1736 tree_insert_offset(&block_group->free_space_offset, 1796 tree_insert_offset(&ctl->free_space_offset,
1737 entry->offset, &entry->offset_index, bitmap); 1797 entry->offset, &entry->offset_index, bitmap);
1738 } 1798 }
1739 cluster->root = RB_ROOT; 1799 cluster->root = RB_ROOT;
@@ -1744,14 +1804,38 @@ out:
1744 return 0; 1804 return 0;
1745} 1805}
1746 1806
1747void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) 1807void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
1748{ 1808{
1749 struct btrfs_free_space *info; 1809 struct btrfs_free_space *info;
1750 struct rb_node *node; 1810 struct rb_node *node;
1811
1812 while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
1813 info = rb_entry(node, struct btrfs_free_space, offset_index);
1814 unlink_free_space(ctl, info);
1815 kfree(info->bitmap);
1816 kmem_cache_free(btrfs_free_space_cachep, info);
1817 if (need_resched()) {
1818 spin_unlock(&ctl->tree_lock);
1819 cond_resched();
1820 spin_lock(&ctl->tree_lock);
1821 }
1822 }
1823}
1824
1825void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
1826{
1827 spin_lock(&ctl->tree_lock);
1828 __btrfs_remove_free_space_cache_locked(ctl);
1829 spin_unlock(&ctl->tree_lock);
1830}
1831
1832void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
1833{
1834 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1751 struct btrfs_free_cluster *cluster; 1835 struct btrfs_free_cluster *cluster;
1752 struct list_head *head; 1836 struct list_head *head;
1753 1837
1754 spin_lock(&block_group->tree_lock); 1838 spin_lock(&ctl->tree_lock);
1755 while ((head = block_group->cluster_list.next) != 1839 while ((head = block_group->cluster_list.next) !=
1756 &block_group->cluster_list) { 1840 &block_group->cluster_list) {
1757 cluster = list_entry(head, struct btrfs_free_cluster, 1841 cluster = list_entry(head, struct btrfs_free_cluster,
@@ -1760,60 +1844,46 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
1760 WARN_ON(cluster->block_group != block_group); 1844 WARN_ON(cluster->block_group != block_group);
1761 __btrfs_return_cluster_to_free_space(block_group, cluster); 1845 __btrfs_return_cluster_to_free_space(block_group, cluster);
1762 if (need_resched()) { 1846 if (need_resched()) {
1763 spin_unlock(&block_group->tree_lock); 1847 spin_unlock(&ctl->tree_lock);
1764 cond_resched(); 1848 cond_resched();
1765 spin_lock(&block_group->tree_lock); 1849 spin_lock(&ctl->tree_lock);
1766 } 1850 }
1767 } 1851 }
1852 __btrfs_remove_free_space_cache_locked(ctl);
1853 spin_unlock(&ctl->tree_lock);
1768 1854
1769 while ((node = rb_last(&block_group->free_space_offset)) != NULL) {
1770 info = rb_entry(node, struct btrfs_free_space, offset_index);
1771 if (!info->bitmap) {
1772 unlink_free_space(block_group, info);
1773 kmem_cache_free(btrfs_free_space_cachep, info);
1774 } else {
1775 free_bitmap(block_group, info);
1776 }
1777
1778 if (need_resched()) {
1779 spin_unlock(&block_group->tree_lock);
1780 cond_resched();
1781 spin_lock(&block_group->tree_lock);
1782 }
1783 }
1784
1785 spin_unlock(&block_group->tree_lock);
1786} 1855}
1787 1856
1788u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, 1857u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
1789 u64 offset, u64 bytes, u64 empty_size) 1858 u64 offset, u64 bytes, u64 empty_size)
1790{ 1859{
1860 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1791 struct btrfs_free_space *entry = NULL; 1861 struct btrfs_free_space *entry = NULL;
1792 u64 bytes_search = bytes + empty_size; 1862 u64 bytes_search = bytes + empty_size;
1793 u64 ret = 0; 1863 u64 ret = 0;
1794 1864
1795 spin_lock(&block_group->tree_lock); 1865 spin_lock(&ctl->tree_lock);
1796 entry = find_free_space(block_group, &offset, &bytes_search, 0); 1866 entry = find_free_space(ctl, &offset, &bytes_search);
1797 if (!entry) 1867 if (!entry)
1798 goto out; 1868 goto out;
1799 1869
1800 ret = offset; 1870 ret = offset;
1801 if (entry->bitmap) { 1871 if (entry->bitmap) {
1802 bitmap_clear_bits(block_group, entry, offset, bytes); 1872 bitmap_clear_bits(ctl, entry, offset, bytes);
1803 if (!entry->bytes) 1873 if (!entry->bytes)
1804 free_bitmap(block_group, entry); 1874 free_bitmap(ctl, entry);
1805 } else { 1875 } else {
1806 unlink_free_space(block_group, entry); 1876 unlink_free_space(ctl, entry);
1807 entry->offset += bytes; 1877 entry->offset += bytes;
1808 entry->bytes -= bytes; 1878 entry->bytes -= bytes;
1809 if (!entry->bytes) 1879 if (!entry->bytes)
1810 kmem_cache_free(btrfs_free_space_cachep, entry); 1880 kmem_cache_free(btrfs_free_space_cachep, entry);
1811 else 1881 else
1812 link_free_space(block_group, entry); 1882 link_free_space(ctl, entry);
1813 } 1883 }
1814 1884
1815out: 1885out:
1816 spin_unlock(&block_group->tree_lock); 1886 spin_unlock(&ctl->tree_lock);
1817 1887
1818 return ret; 1888 return ret;
1819} 1889}
@@ -1830,6 +1900,7 @@ int btrfs_return_cluster_to_free_space(
1830 struct btrfs_block_group_cache *block_group, 1900 struct btrfs_block_group_cache *block_group,
1831 struct btrfs_free_cluster *cluster) 1901 struct btrfs_free_cluster *cluster)
1832{ 1902{
1903 struct btrfs_free_space_ctl *ctl;
1833 int ret; 1904 int ret;
1834 1905
1835 /* first, get a safe pointer to the block group */ 1906 /* first, get a safe pointer to the block group */
@@ -1848,10 +1919,12 @@ int btrfs_return_cluster_to_free_space(
1848 atomic_inc(&block_group->count); 1919 atomic_inc(&block_group->count);
1849 spin_unlock(&cluster->lock); 1920 spin_unlock(&cluster->lock);
1850 1921
1922 ctl = block_group->free_space_ctl;
1923
1851 /* now return any extents the cluster had on it */ 1924 /* now return any extents the cluster had on it */
1852 spin_lock(&block_group->tree_lock); 1925 spin_lock(&ctl->tree_lock);
1853 ret = __btrfs_return_cluster_to_free_space(block_group, cluster); 1926 ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
1854 spin_unlock(&block_group->tree_lock); 1927 spin_unlock(&ctl->tree_lock);
1855 1928
1856 /* finally drop our ref */ 1929 /* finally drop our ref */
1857 btrfs_put_block_group(block_group); 1930 btrfs_put_block_group(block_group);
@@ -1863,6 +1936,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
1863 struct btrfs_free_space *entry, 1936 struct btrfs_free_space *entry,
1864 u64 bytes, u64 min_start) 1937 u64 bytes, u64 min_start)
1865{ 1938{
1939 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1866 int err; 1940 int err;
1867 u64 search_start = cluster->window_start; 1941 u64 search_start = cluster->window_start;
1868 u64 search_bytes = bytes; 1942 u64 search_bytes = bytes;
@@ -1871,13 +1945,12 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
1871 search_start = min_start; 1945 search_start = min_start;
1872 search_bytes = bytes; 1946 search_bytes = bytes;
1873 1947
1874 err = search_bitmap(block_group, entry, &search_start, 1948 err = search_bitmap(ctl, entry, &search_start, &search_bytes);
1875 &search_bytes);
1876 if (err) 1949 if (err)
1877 return 0; 1950 return 0;
1878 1951
1879 ret = search_start; 1952 ret = search_start;
1880 bitmap_clear_bits(block_group, entry, ret, bytes); 1953 bitmap_clear_bits(ctl, entry, ret, bytes);
1881 1954
1882 return ret; 1955 return ret;
1883} 1956}
@@ -1891,6 +1964,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
1891 struct btrfs_free_cluster *cluster, u64 bytes, 1964 struct btrfs_free_cluster *cluster, u64 bytes,
1892 u64 min_start) 1965 u64 min_start)
1893{ 1966{
1967 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1894 struct btrfs_free_space *entry = NULL; 1968 struct btrfs_free_space *entry = NULL;
1895 struct rb_node *node; 1969 struct rb_node *node;
1896 u64 ret = 0; 1970 u64 ret = 0;
@@ -1951,20 +2025,20 @@ out:
1951 if (!ret) 2025 if (!ret)
1952 return 0; 2026 return 0;
1953 2027
1954 spin_lock(&block_group->tree_lock); 2028 spin_lock(&ctl->tree_lock);
1955 2029
1956 block_group->free_space -= bytes; 2030 ctl->free_space -= bytes;
1957 if (entry->bytes == 0) { 2031 if (entry->bytes == 0) {
1958 block_group->free_extents--; 2032 ctl->free_extents--;
1959 if (entry->bitmap) { 2033 if (entry->bitmap) {
1960 kfree(entry->bitmap); 2034 kfree(entry->bitmap);
1961 block_group->total_bitmaps--; 2035 ctl->total_bitmaps--;
1962 recalculate_thresholds(block_group); 2036 ctl->op->recalc_thresholds(ctl);
1963 } 2037 }
1964 kmem_cache_free(btrfs_free_space_cachep, entry); 2038 kmem_cache_free(btrfs_free_space_cachep, entry);
1965 } 2039 }
1966 2040
1967 spin_unlock(&block_group->tree_lock); 2041 spin_unlock(&ctl->tree_lock);
1968 2042
1969 return ret; 2043 return ret;
1970} 2044}
@@ -1974,6 +2048,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
1974 struct btrfs_free_cluster *cluster, 2048 struct btrfs_free_cluster *cluster,
1975 u64 offset, u64 bytes, u64 min_bytes) 2049 u64 offset, u64 bytes, u64 min_bytes)
1976{ 2050{
2051 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1977 unsigned long next_zero; 2052 unsigned long next_zero;
1978 unsigned long i; 2053 unsigned long i;
1979 unsigned long search_bits; 2054 unsigned long search_bits;
@@ -2028,7 +2103,7 @@ again:
2028 2103
2029 cluster->window_start = start * block_group->sectorsize + 2104 cluster->window_start = start * block_group->sectorsize +
2030 entry->offset; 2105 entry->offset;
2031 rb_erase(&entry->offset_index, &block_group->free_space_offset); 2106 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2032 ret = tree_insert_offset(&cluster->root, entry->offset, 2107 ret = tree_insert_offset(&cluster->root, entry->offset,
2033 &entry->offset_index, 1); 2108 &entry->offset_index, 1);
2034 BUG_ON(ret); 2109 BUG_ON(ret);
@@ -2043,6 +2118,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2043 struct btrfs_free_cluster *cluster, 2118 struct btrfs_free_cluster *cluster,
2044 u64 offset, u64 bytes, u64 min_bytes) 2119 u64 offset, u64 bytes, u64 min_bytes)
2045{ 2120{
2121 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2046 struct btrfs_free_space *first = NULL; 2122 struct btrfs_free_space *first = NULL;
2047 struct btrfs_free_space *entry = NULL; 2123 struct btrfs_free_space *entry = NULL;
2048 struct btrfs_free_space *prev = NULL; 2124 struct btrfs_free_space *prev = NULL;
@@ -2053,7 +2129,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2053 u64 max_extent; 2129 u64 max_extent;
2054 u64 max_gap = 128 * 1024; 2130 u64 max_gap = 128 * 1024;
2055 2131
2056 entry = tree_search_offset(block_group, offset, 0, 1); 2132 entry = tree_search_offset(ctl, offset, 0, 1);
2057 if (!entry) 2133 if (!entry)
2058 return -ENOSPC; 2134 return -ENOSPC;
2059 2135
@@ -2119,7 +2195,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2119 if (entry->bitmap) 2195 if (entry->bitmap)
2120 continue; 2196 continue;
2121 2197
2122 rb_erase(&entry->offset_index, &block_group->free_space_offset); 2198 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2123 ret = tree_insert_offset(&cluster->root, entry->offset, 2199 ret = tree_insert_offset(&cluster->root, entry->offset,
2124 &entry->offset_index, 0); 2200 &entry->offset_index, 0);
2125 BUG_ON(ret); 2201 BUG_ON(ret);
@@ -2138,16 +2214,15 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2138 struct btrfs_free_cluster *cluster, 2214 struct btrfs_free_cluster *cluster,
2139 u64 offset, u64 bytes, u64 min_bytes) 2215 u64 offset, u64 bytes, u64 min_bytes)
2140{ 2216{
2217 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2141 struct btrfs_free_space *entry; 2218 struct btrfs_free_space *entry;
2142 struct rb_node *node; 2219 struct rb_node *node;
2143 int ret = -ENOSPC; 2220 int ret = -ENOSPC;
2144 2221
2145 if (block_group->total_bitmaps == 0) 2222 if (ctl->total_bitmaps == 0)
2146 return -ENOSPC; 2223 return -ENOSPC;
2147 2224
2148 entry = tree_search_offset(block_group, 2225 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
2149 offset_to_bitmap(block_group, offset),
2150 0, 1);
2151 if (!entry) 2226 if (!entry)
2152 return -ENOSPC; 2227 return -ENOSPC;
2153 2228
@@ -2180,6 +2255,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2180 struct btrfs_free_cluster *cluster, 2255 struct btrfs_free_cluster *cluster,
2181 u64 offset, u64 bytes, u64 empty_size) 2256 u64 offset, u64 bytes, u64 empty_size)
2182{ 2257{
2258 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2183 u64 min_bytes; 2259 u64 min_bytes;
2184 int ret; 2260 int ret;
2185 2261
@@ -2199,14 +2275,14 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2199 } else 2275 } else
2200 min_bytes = max(bytes, (bytes + empty_size) >> 2); 2276 min_bytes = max(bytes, (bytes + empty_size) >> 2);
2201 2277
2202 spin_lock(&block_group->tree_lock); 2278 spin_lock(&ctl->tree_lock);
2203 2279
2204 /* 2280 /*
2205 * If we know we don't have enough space to make a cluster don't even 2281 * If we know we don't have enough space to make a cluster don't even
2206 * bother doing all the work to try and find one. 2282 * bother doing all the work to try and find one.
2207 */ 2283 */
2208 if (block_group->free_space < min_bytes) { 2284 if (ctl->free_space < min_bytes) {
2209 spin_unlock(&block_group->tree_lock); 2285 spin_unlock(&ctl->tree_lock);
2210 return -ENOSPC; 2286 return -ENOSPC;
2211 } 2287 }
2212 2288
@@ -2232,7 +2308,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2232 } 2308 }
2233out: 2309out:
2234 spin_unlock(&cluster->lock); 2310 spin_unlock(&cluster->lock);
2235 spin_unlock(&block_group->tree_lock); 2311 spin_unlock(&ctl->tree_lock);
2236 2312
2237 return ret; 2313 return ret;
2238} 2314}
@@ -2253,6 +2329,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
2253int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, 2329int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2254 u64 *trimmed, u64 start, u64 end, u64 minlen) 2330 u64 *trimmed, u64 start, u64 end, u64 minlen)
2255{ 2331{
2332 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2256 struct btrfs_free_space *entry = NULL; 2333 struct btrfs_free_space *entry = NULL;
2257 struct btrfs_fs_info *fs_info = block_group->fs_info; 2334 struct btrfs_fs_info *fs_info = block_group->fs_info;
2258 u64 bytes = 0; 2335 u64 bytes = 0;
@@ -2262,52 +2339,50 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2262 *trimmed = 0; 2339 *trimmed = 0;
2263 2340
2264 while (start < end) { 2341 while (start < end) {
2265 spin_lock(&block_group->tree_lock); 2342 spin_lock(&ctl->tree_lock);
2266 2343
2267 if (block_group->free_space < minlen) { 2344 if (ctl->free_space < minlen) {
2268 spin_unlock(&block_group->tree_lock); 2345 spin_unlock(&ctl->tree_lock);
2269 break; 2346 break;
2270 } 2347 }
2271 2348
2272 entry = tree_search_offset(block_group, start, 0, 1); 2349 entry = tree_search_offset(ctl, start, 0, 1);
2273 if (!entry) 2350 if (!entry)
2274 entry = tree_search_offset(block_group, 2351 entry = tree_search_offset(ctl,
2275 offset_to_bitmap(block_group, 2352 offset_to_bitmap(ctl, start),
2276 start),
2277 1, 1); 2353 1, 1);
2278 2354
2279 if (!entry || entry->offset >= end) { 2355 if (!entry || entry->offset >= end) {
2280 spin_unlock(&block_group->tree_lock); 2356 spin_unlock(&ctl->tree_lock);
2281 break; 2357 break;
2282 } 2358 }
2283 2359
2284 if (entry->bitmap) { 2360 if (entry->bitmap) {
2285 ret = search_bitmap(block_group, entry, &start, &bytes); 2361 ret = search_bitmap(ctl, entry, &start, &bytes);
2286 if (!ret) { 2362 if (!ret) {
2287 if (start >= end) { 2363 if (start >= end) {
2288 spin_unlock(&block_group->tree_lock); 2364 spin_unlock(&ctl->tree_lock);
2289 break; 2365 break;
2290 } 2366 }
2291 bytes = min(bytes, end - start); 2367 bytes = min(bytes, end - start);
2292 bitmap_clear_bits(block_group, entry, 2368 bitmap_clear_bits(ctl, entry, start, bytes);
2293 start, bytes);
2294 if (entry->bytes == 0) 2369 if (entry->bytes == 0)
2295 free_bitmap(block_group, entry); 2370 free_bitmap(ctl, entry);
2296 } else { 2371 } else {
2297 start = entry->offset + BITS_PER_BITMAP * 2372 start = entry->offset + BITS_PER_BITMAP *
2298 block_group->sectorsize; 2373 block_group->sectorsize;
2299 spin_unlock(&block_group->tree_lock); 2374 spin_unlock(&ctl->tree_lock);
2300 ret = 0; 2375 ret = 0;
2301 continue; 2376 continue;
2302 } 2377 }
2303 } else { 2378 } else {
2304 start = entry->offset; 2379 start = entry->offset;
2305 bytes = min(entry->bytes, end - start); 2380 bytes = min(entry->bytes, end - start);
2306 unlink_free_space(block_group, entry); 2381 unlink_free_space(ctl, entry);
2307 kmem_cache_free(btrfs_free_space_cachep, entry); 2382 kmem_cache_free(btrfs_free_space_cachep, entry);
2308 } 2383 }
2309 2384
2310 spin_unlock(&block_group->tree_lock); 2385 spin_unlock(&ctl->tree_lock);
2311 2386
2312 if (bytes >= minlen) { 2387 if (bytes >= minlen) {
2313 int update_ret; 2388 int update_ret;
@@ -2319,8 +2394,7 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2319 bytes, 2394 bytes,
2320 &actually_trimmed); 2395 &actually_trimmed);
2321 2396
2322 btrfs_add_free_space(block_group, 2397 btrfs_add_free_space(block_group, start, bytes);
2323 start, bytes);
2324 if (!update_ret) 2398 if (!update_ret)
2325 btrfs_update_reserved_bytes(block_group, 2399 btrfs_update_reserved_bytes(block_group,
2326 bytes, 0, 1); 2400 bytes, 0, 1);
@@ -2342,3 +2416,145 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2342 2416
2343 return ret; 2417 return ret;
2344} 2418}
2419
2420/*
2421 * Find the left-most item in the cache tree, and then return the
2422 * smallest inode number in the item.
2423 *
2424 * Note: the returned inode number may not be the smallest one in
2425 * the tree, if the left-most item is a bitmap.
2426 */
2427u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
2428{
2429 struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
2430 struct btrfs_free_space *entry = NULL;
2431 u64 ino = 0;
2432
2433 spin_lock(&ctl->tree_lock);
2434
2435 if (RB_EMPTY_ROOT(&ctl->free_space_offset))
2436 goto out;
2437
2438 entry = rb_entry(rb_first(&ctl->free_space_offset),
2439 struct btrfs_free_space, offset_index);
2440
2441 if (!entry->bitmap) {
2442 ino = entry->offset;
2443
2444 unlink_free_space(ctl, entry);
2445 entry->offset++;
2446 entry->bytes--;
2447 if (!entry->bytes)
2448 kmem_cache_free(btrfs_free_space_cachep, entry);
2449 else
2450 link_free_space(ctl, entry);
2451 } else {
2452 u64 offset = 0;
2453 u64 count = 1;
2454 int ret;
2455
2456 ret = search_bitmap(ctl, entry, &offset, &count);
2457 BUG_ON(ret);
2458
2459 ino = offset;
2460 bitmap_clear_bits(ctl, entry, offset, 1);
2461 if (entry->bytes == 0)
2462 free_bitmap(ctl, entry);
2463 }
2464out:
2465 spin_unlock(&ctl->tree_lock);
2466
2467 return ino;
2468}
2469
2470struct inode *lookup_free_ino_inode(struct btrfs_root *root,
2471 struct btrfs_path *path)
2472{
2473 struct inode *inode = NULL;
2474
2475 spin_lock(&root->cache_lock);
2476 if (root->cache_inode)
2477 inode = igrab(root->cache_inode);
2478 spin_unlock(&root->cache_lock);
2479 if (inode)
2480 return inode;
2481
2482 inode = __lookup_free_space_inode(root, path, 0);
2483 if (IS_ERR(inode))
2484 return inode;
2485
2486 spin_lock(&root->cache_lock);
2487 if (!root->fs_info->closing)
2488 root->cache_inode = igrab(inode);
2489 spin_unlock(&root->cache_lock);
2490
2491 return inode;
2492}
2493
2494int create_free_ino_inode(struct btrfs_root *root,
2495 struct btrfs_trans_handle *trans,
2496 struct btrfs_path *path)
2497{
2498 return __create_free_space_inode(root, trans, path,
2499 BTRFS_FREE_INO_OBJECTID, 0);
2500}
2501
2502int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2503{
2504 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2505 struct btrfs_path *path;
2506 struct inode *inode;
2507 int ret = 0;
2508 u64 root_gen = btrfs_root_generation(&root->root_item);
2509
2510 /*
2511 * If we're unmounting then just return, since this does a search on the
2512 * normal root and not the commit root and we could deadlock.
2513 */
2514 smp_mb();
2515 if (fs_info->closing)
2516 return 0;
2517
2518 path = btrfs_alloc_path();
2519 if (!path)
2520 return 0;
2521
2522 inode = lookup_free_ino_inode(root, path);
2523 if (IS_ERR(inode))
2524 goto out;
2525
2526 if (root_gen != BTRFS_I(inode)->generation)
2527 goto out_put;
2528
2529 ret = __load_free_space_cache(root, inode, ctl, path, 0);
2530
2531 if (ret < 0)
2532 printk(KERN_ERR "btrfs: failed to load free ino cache for "
2533 "root %llu\n", root->root_key.objectid);
2534out_put:
2535 iput(inode);
2536out:
2537 btrfs_free_path(path);
2538 return ret;
2539}
2540
2541int btrfs_write_out_ino_cache(struct btrfs_root *root,
2542 struct btrfs_trans_handle *trans,
2543 struct btrfs_path *path)
2544{
2545 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2546 struct inode *inode;
2547 int ret;
2548
2549 inode = lookup_free_ino_inode(root, path);
2550 if (IS_ERR(inode))
2551 return 0;
2552
2553 ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
2554 if (ret < 0)
2555 printk(KERN_ERR "btrfs: failed to write free ino cache "
2556 "for root %llu\n", root->root_key.objectid);
2557
2558 iput(inode);
2559 return ret;
2560}
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 65c3b935289..8f2613f779e 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -27,6 +27,25 @@ struct btrfs_free_space {
27 struct list_head list; 27 struct list_head list;
28}; 28};
29 29
30struct btrfs_free_space_ctl {
31 spinlock_t tree_lock;
32 struct rb_root free_space_offset;
33 u64 free_space;
34 int extents_thresh;
35 int free_extents;
36 int total_bitmaps;
37 int unit;
38 u64 start;
39 struct btrfs_free_space_op *op;
40 void *private;
41};
42
43struct btrfs_free_space_op {
44 void (*recalc_thresholds)(struct btrfs_free_space_ctl *ctl);
45 bool (*use_bitmap)(struct btrfs_free_space_ctl *ctl,
46 struct btrfs_free_space *info);
47};
48
30struct inode *lookup_free_space_inode(struct btrfs_root *root, 49struct inode *lookup_free_space_inode(struct btrfs_root *root,
31 struct btrfs_block_group_cache 50 struct btrfs_block_group_cache
32 *block_group, struct btrfs_path *path); 51 *block_group, struct btrfs_path *path);
@@ -45,17 +64,38 @@ int btrfs_write_out_cache(struct btrfs_root *root,
45 struct btrfs_trans_handle *trans, 64 struct btrfs_trans_handle *trans,
46 struct btrfs_block_group_cache *block_group, 65 struct btrfs_block_group_cache *block_group,
47 struct btrfs_path *path); 66 struct btrfs_path *path);
48int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, 67
49 u64 bytenr, u64 size); 68struct inode *lookup_free_ino_inode(struct btrfs_root *root,
69 struct btrfs_path *path);
70int create_free_ino_inode(struct btrfs_root *root,
71 struct btrfs_trans_handle *trans,
72 struct btrfs_path *path);
73int load_free_ino_cache(struct btrfs_fs_info *fs_info,
74 struct btrfs_root *root);
75int btrfs_write_out_ino_cache(struct btrfs_root *root,
76 struct btrfs_trans_handle *trans,
77 struct btrfs_path *path);
78
79void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group);
80int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
81 u64 bytenr, u64 size);
82static inline int
83btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
84 u64 bytenr, u64 size)
85{
86 return __btrfs_add_free_space(block_group->free_space_ctl,
87 bytenr, size);
88}
50int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, 89int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
51 u64 bytenr, u64 size); 90 u64 bytenr, u64 size);
91void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl);
52void btrfs_remove_free_space_cache(struct btrfs_block_group_cache 92void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
53 *block_group); 93 *block_group);
54u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, 94u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
55 u64 offset, u64 bytes, u64 empty_size); 95 u64 offset, u64 bytes, u64 empty_size);
96u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
56void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, 97void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
57 u64 bytes); 98 u64 bytes);
58u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group);
59int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, 99int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
60 struct btrfs_root *root, 100 struct btrfs_root *root,
61 struct btrfs_block_group_cache *block_group, 101 struct btrfs_block_group_cache *block_group,
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index c05a08f4c41..7967e85c72f 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -16,11 +16,430 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/delay.h>
20#include <linux/kthread.h>
21#include <linux/pagemap.h>
22
19#include "ctree.h" 23#include "ctree.h"
20#include "disk-io.h" 24#include "disk-io.h"
25#include "free-space-cache.h"
26#include "inode-map.h"
21#include "transaction.h" 27#include "transaction.h"
22 28
23int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid) 29static int caching_kthread(void *data)
30{
31 struct btrfs_root *root = data;
32 struct btrfs_fs_info *fs_info = root->fs_info;
33 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
34 struct btrfs_key key;
35 struct btrfs_path *path;
36 struct extent_buffer *leaf;
37 u64 last = (u64)-1;
38 int slot;
39 int ret;
40
41 path = btrfs_alloc_path();
42 if (!path)
43 return -ENOMEM;
44
45 /* Since the commit root is read-only, we can safely skip locking. */
46 path->skip_locking = 1;
47 path->search_commit_root = 1;
48 path->reada = 2;
49
50 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
51 key.offset = 0;
52 key.type = BTRFS_INODE_ITEM_KEY;
53again:
54 /* need to make sure the commit_root doesn't disappear */
55 mutex_lock(&root->fs_commit_mutex);
56
57 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
58 if (ret < 0)
59 goto out;
60
61 while (1) {
62 smp_mb();
63 if (fs_info->closing > 1)
64 goto out;
65
66 leaf = path->nodes[0];
67 slot = path->slots[0];
68 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
69 ret = btrfs_next_leaf(root, path);
70 if (ret < 0)
71 goto out;
72 else if (ret > 0)
73 break;
74
75 if (need_resched() ||
76 btrfs_transaction_in_commit(fs_info)) {
77 leaf = path->nodes[0];
78
79 if (btrfs_header_nritems(leaf) == 0) {
80 WARN_ON(1);
81 break;
82 }
83
84 /*
85 * Save the key so we can advances forward
86 * in the next search.
87 */
88 btrfs_item_key_to_cpu(leaf, &key, 0);
89 btrfs_release_path(root, path);
90 root->cache_progress = last;
91 mutex_unlock(&root->fs_commit_mutex);
92 schedule_timeout(1);
93 goto again;
94 } else
95 continue;
96 }
97
98 btrfs_item_key_to_cpu(leaf, &key, slot);
99
100 if (key.type != BTRFS_INODE_ITEM_KEY)
101 goto next;
102
103 if (key.objectid >= BTRFS_LAST_FREE_OBJECTID)
104 break;
105
106 if (last != (u64)-1 && last + 1 != key.objectid) {
107 __btrfs_add_free_space(ctl, last + 1,
108 key.objectid - last - 1);
109 wake_up(&root->cache_wait);
110 }
111
112 last = key.objectid;
113next:
114 path->slots[0]++;
115 }
116
117 if (last < BTRFS_LAST_FREE_OBJECTID - 1) {
118 __btrfs_add_free_space(ctl, last + 1,
119 BTRFS_LAST_FREE_OBJECTID - last - 1);
120 }
121
122 spin_lock(&root->cache_lock);
123 root->cached = BTRFS_CACHE_FINISHED;
124 spin_unlock(&root->cache_lock);
125
126 root->cache_progress = (u64)-1;
127 btrfs_unpin_free_ino(root);
128out:
129 wake_up(&root->cache_wait);
130 mutex_unlock(&root->fs_commit_mutex);
131
132 btrfs_free_path(path);
133
134 return ret;
135}
136
137static void start_caching(struct btrfs_root *root)
138{
139 struct task_struct *tsk;
140 int ret;
141
142 spin_lock(&root->cache_lock);
143 if (root->cached != BTRFS_CACHE_NO) {
144 spin_unlock(&root->cache_lock);
145 return;
146 }
147
148 root->cached = BTRFS_CACHE_STARTED;
149 spin_unlock(&root->cache_lock);
150
151 ret = load_free_ino_cache(root->fs_info, root);
152 if (ret == 1) {
153 spin_lock(&root->cache_lock);
154 root->cached = BTRFS_CACHE_FINISHED;
155 spin_unlock(&root->cache_lock);
156 return;
157 }
158
159 tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
160 root->root_key.objectid);
161 BUG_ON(IS_ERR(tsk));
162}
163
164int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
165{
166again:
167 *objectid = btrfs_find_ino_for_alloc(root);
168
169 if (*objectid != 0)
170 return 0;
171
172 start_caching(root);
173
174 wait_event(root->cache_wait,
175 root->cached == BTRFS_CACHE_FINISHED ||
176 root->free_ino_ctl->free_space > 0);
177
178 if (root->cached == BTRFS_CACHE_FINISHED &&
179 root->free_ino_ctl->free_space == 0)
180 return -ENOSPC;
181 else
182 goto again;
183}
184
185void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
186{
187 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
188 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
189again:
190 if (root->cached == BTRFS_CACHE_FINISHED) {
191 __btrfs_add_free_space(ctl, objectid, 1);
192 } else {
193 /*
194 * If we are in the process of caching free ino chunks,
195 * to avoid adding the same inode number to the free_ino
196 * tree twice due to cross transaction, we'll leave it
197 * in the pinned tree until a transaction is committed
198 * or the caching work is done.
199 */
200
201 mutex_lock(&root->fs_commit_mutex);
202 spin_lock(&root->cache_lock);
203 if (root->cached == BTRFS_CACHE_FINISHED) {
204 spin_unlock(&root->cache_lock);
205 mutex_unlock(&root->fs_commit_mutex);
206 goto again;
207 }
208 spin_unlock(&root->cache_lock);
209
210 start_caching(root);
211
212 if (objectid <= root->cache_progress)
213 __btrfs_add_free_space(ctl, objectid, 1);
214 else
215 __btrfs_add_free_space(pinned, objectid, 1);
216
217 mutex_unlock(&root->fs_commit_mutex);
218 }
219}
220
221/*
222 * When a transaction is committed, we'll move those inode numbers which
223 * are smaller than root->cache_progress from pinned tree to free_ino tree,
224 * and others will just be dropped, because the commit root we were
225 * searching has changed.
226 *
227 * Must be called with root->fs_commit_mutex held
228 */
229void btrfs_unpin_free_ino(struct btrfs_root *root)
230{
231 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
232 struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
233 struct btrfs_free_space *info;
234 struct rb_node *n;
235 u64 count;
236
237 while (1) {
238 n = rb_first(rbroot);
239 if (!n)
240 break;
241
242 info = rb_entry(n, struct btrfs_free_space, offset_index);
243 BUG_ON(info->bitmap);
244
245 if (info->offset > root->cache_progress)
246 goto free;
247 else if (info->offset + info->bytes > root->cache_progress)
248 count = root->cache_progress - info->offset + 1;
249 else
250 count = info->bytes;
251
252 __btrfs_add_free_space(ctl, info->offset, count);
253free:
254 rb_erase(&info->offset_index, rbroot);
255 kfree(info);
256 }
257}
258
259#define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space))
260#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
261
262/*
263 * The goal is to keep the memory used by the free_ino tree won't
264 * exceed the memory if we use bitmaps only.
265 */
266static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
267{
268 struct btrfs_free_space *info;
269 struct rb_node *n;
270 int max_ino;
271 int max_bitmaps;
272
273 n = rb_last(&ctl->free_space_offset);
274 if (!n) {
275 ctl->extents_thresh = INIT_THRESHOLD;
276 return;
277 }
278 info = rb_entry(n, struct btrfs_free_space, offset_index);
279
280 /*
281 * Find the maximum inode number in the filesystem. Note we
282 * ignore the fact that this can be a bitmap, because we are
283 * not doing precise calculation.
284 */
285 max_ino = info->bytes - 1;
286
287 max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP;
288 if (max_bitmaps <= ctl->total_bitmaps) {
289 ctl->extents_thresh = 0;
290 return;
291 }
292
293 ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
294 PAGE_CACHE_SIZE / sizeof(*info);
295}
296
297/*
298 * We don't fall back to bitmap, if we are below the extents threshold
299 * or this chunk of inode numbers is a big one.
300 */
301static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
302 struct btrfs_free_space *info)
303{
304 if (ctl->free_extents < ctl->extents_thresh ||
305 info->bytes > INODES_PER_BITMAP / 10)
306 return false;
307
308 return true;
309}
310
311static struct btrfs_free_space_op free_ino_op = {
312 .recalc_thresholds = recalculate_thresholds,
313 .use_bitmap = use_bitmap,
314};
315
316static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl)
317{
318}
319
320static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
321 struct btrfs_free_space *info)
322{
323 /*
324 * We always use extents for two reasons:
325 *
326 * - The pinned tree is only used during the process of caching
327 * work.
328 * - Make code simpler. See btrfs_unpin_free_ino().
329 */
330 return false;
331}
332
333static struct btrfs_free_space_op pinned_free_ino_op = {
334 .recalc_thresholds = pinned_recalc_thresholds,
335 .use_bitmap = pinned_use_bitmap,
336};
337
338void btrfs_init_free_ino_ctl(struct btrfs_root *root)
339{
340 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
341 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
342
343 spin_lock_init(&ctl->tree_lock);
344 ctl->unit = 1;
345 ctl->start = 0;
346 ctl->private = NULL;
347 ctl->op = &free_ino_op;
348
349 /*
350 * Initially we allow to use 16K of ram to cache chunks of
351 * inode numbers before we resort to bitmaps. This is somewhat
352 * arbitrary, but it will be adjusted in runtime.
353 */
354 ctl->extents_thresh = INIT_THRESHOLD;
355
356 spin_lock_init(&pinned->tree_lock);
357 pinned->unit = 1;
358 pinned->start = 0;
359 pinned->private = NULL;
360 pinned->extents_thresh = 0;
361 pinned->op = &pinned_free_ino_op;
362}
363
364int btrfs_save_ino_cache(struct btrfs_root *root,
365 struct btrfs_trans_handle *trans)
366{
367 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
368 struct btrfs_path *path;
369 struct inode *inode;
370 u64 alloc_hint = 0;
371 int ret;
372 int prealloc;
373 bool retry = false;
374
375 path = btrfs_alloc_path();
376 if (!path)
377 return -ENOMEM;
378again:
379 inode = lookup_free_ino_inode(root, path);
380 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
381 ret = PTR_ERR(inode);
382 goto out;
383 }
384
385 if (IS_ERR(inode)) {
386 BUG_ON(retry);
387 retry = true;
388
389 ret = create_free_ino_inode(root, trans, path);
390 if (ret)
391 goto out;
392 goto again;
393 }
394
395 BTRFS_I(inode)->generation = 0;
396 ret = btrfs_update_inode(trans, root, inode);
397 WARN_ON(ret);
398
399 if (i_size_read(inode) > 0) {
400 ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
401 if (ret)
402 goto out_put;
403 }
404
405 spin_lock(&root->cache_lock);
406 if (root->cached != BTRFS_CACHE_FINISHED) {
407 ret = -1;
408 spin_unlock(&root->cache_lock);
409 goto out_put;
410 }
411 spin_unlock(&root->cache_lock);
412
413 spin_lock(&ctl->tree_lock);
414 prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
415 prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE);
416 prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE;
417 spin_unlock(&ctl->tree_lock);
418
419 /* Just to make sure we have enough space */
420 prealloc += 8 * PAGE_CACHE_SIZE;
421
422 ret = btrfs_check_data_free_space(inode, prealloc);
423 if (ret)
424 goto out_put;
425
426 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
427 prealloc, prealloc, &alloc_hint);
428 if (ret)
429 goto out_put;
430 btrfs_free_reserved_data_space(inode, prealloc);
431
432out_put:
433 iput(inode);
434out:
435 if (ret == 0)
436 ret = btrfs_write_out_ino_cache(root, trans, path);
437
438 btrfs_free_path(path);
439 return ret;
440}
441
442static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
24{ 443{
25 struct btrfs_path *path; 444 struct btrfs_path *path;
26 int ret; 445 int ret;
@@ -55,15 +474,14 @@ error:
55 return ret; 474 return ret;
56} 475}
57 476
58int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, 477int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
59 struct btrfs_root *root,
60 u64 dirid, u64 *objectid)
61{ 478{
62 int ret; 479 int ret;
63 mutex_lock(&root->objectid_mutex); 480 mutex_lock(&root->objectid_mutex);
64 481
65 if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) { 482 if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
66 ret = btrfs_find_highest_inode(root, &root->highest_objectid); 483 ret = btrfs_find_highest_objectid(root,
484 &root->highest_objectid);
67 if (ret) 485 if (ret)
68 goto out; 486 goto out;
69 } 487 }
diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h
new file mode 100644
index 00000000000..ddb347bfee2
--- /dev/null
+++ b/fs/btrfs/inode-map.h
@@ -0,0 +1,13 @@
1#ifndef __BTRFS_INODE_MAP
2#define __BTRFS_INODE_MAP
3
4void btrfs_init_free_ino_ctl(struct btrfs_root *root);
5void btrfs_unpin_free_ino(struct btrfs_root *root);
6void btrfs_return_ino(struct btrfs_root *root, u64 objectid);
7int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid);
8int btrfs_save_ino_cache(struct btrfs_root *root,
9 struct btrfs_trans_handle *trans);
10
11int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
12
13#endif
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 7cd8ab0ef04..01438e9ba2e 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -51,6 +51,7 @@
51#include "compression.h" 51#include "compression.h"
52#include "locking.h" 52#include "locking.h"
53#include "free-space-cache.h" 53#include "free-space-cache.h"
54#include "inode-map.h"
54 55
55struct btrfs_iget_args { 56struct btrfs_iget_args {
56 u64 ino; 57 u64 ino;
@@ -138,7 +139,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
138 path->leave_spinning = 1; 139 path->leave_spinning = 1;
139 btrfs_set_trans_block_group(trans, inode); 140 btrfs_set_trans_block_group(trans, inode);
140 141
141 key.objectid = inode->i_ino; 142 key.objectid = btrfs_ino(inode);
142 key.offset = start; 143 key.offset = start;
143 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); 144 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
144 datasize = btrfs_file_extent_calc_inline_size(cur_size); 145 datasize = btrfs_file_extent_calc_inline_size(cur_size);
@@ -745,6 +746,15 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
745 return alloc_hint; 746 return alloc_hint;
746} 747}
747 748
749static inline bool is_free_space_inode(struct btrfs_root *root,
750 struct inode *inode)
751{
752 if (root == root->fs_info->tree_root ||
753 BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
754 return true;
755 return false;
756}
757
748/* 758/*
749 * when extent_io.c finds a delayed allocation range in the file, 759 * when extent_io.c finds a delayed allocation range in the file,
750 * the call backs end up in this code. The basic idea is to 760 * the call backs end up in this code. The basic idea is to
@@ -777,7 +787,7 @@ static noinline int cow_file_range(struct inode *inode,
777 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 787 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
778 int ret = 0; 788 int ret = 0;
779 789
780 BUG_ON(root == root->fs_info->tree_root); 790 BUG_ON(is_free_space_inode(root, inode));
781 trans = btrfs_join_transaction(root, 1); 791 trans = btrfs_join_transaction(root, 1);
782 BUG_ON(IS_ERR(trans)); 792 BUG_ON(IS_ERR(trans));
783 btrfs_set_trans_block_group(trans, inode); 793 btrfs_set_trans_block_group(trans, inode);
@@ -1049,29 +1059,31 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1049 int type; 1059 int type;
1050 int nocow; 1060 int nocow;
1051 int check_prev = 1; 1061 int check_prev = 1;
1052 bool nolock = false; 1062 bool nolock;
1063 u64 ino = btrfs_ino(inode);
1053 1064
1054 path = btrfs_alloc_path(); 1065 path = btrfs_alloc_path();
1055 BUG_ON(!path); 1066 BUG_ON(!path);
1056 if (root == root->fs_info->tree_root) { 1067
1057 nolock = true; 1068 nolock = is_free_space_inode(root, inode);
1069
1070 if (nolock)
1058 trans = btrfs_join_transaction_nolock(root, 1); 1071 trans = btrfs_join_transaction_nolock(root, 1);
1059 } else { 1072 else
1060 trans = btrfs_join_transaction(root, 1); 1073 trans = btrfs_join_transaction(root, 1);
1061 }
1062 BUG_ON(IS_ERR(trans)); 1074 BUG_ON(IS_ERR(trans));
1063 1075
1064 cow_start = (u64)-1; 1076 cow_start = (u64)-1;
1065 cur_offset = start; 1077 cur_offset = start;
1066 while (1) { 1078 while (1) {
1067 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 1079 ret = btrfs_lookup_file_extent(trans, root, path, ino,
1068 cur_offset, 0); 1080 cur_offset, 0);
1069 BUG_ON(ret < 0); 1081 BUG_ON(ret < 0);
1070 if (ret > 0 && path->slots[0] > 0 && check_prev) { 1082 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1071 leaf = path->nodes[0]; 1083 leaf = path->nodes[0];
1072 btrfs_item_key_to_cpu(leaf, &found_key, 1084 btrfs_item_key_to_cpu(leaf, &found_key,
1073 path->slots[0] - 1); 1085 path->slots[0] - 1);
1074 if (found_key.objectid == inode->i_ino && 1086 if (found_key.objectid == ino &&
1075 found_key.type == BTRFS_EXTENT_DATA_KEY) 1087 found_key.type == BTRFS_EXTENT_DATA_KEY)
1076 path->slots[0]--; 1088 path->slots[0]--;
1077 } 1089 }
@@ -1092,7 +1104,7 @@ next_slot:
1092 num_bytes = 0; 1104 num_bytes = 0;
1093 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1105 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1094 1106
1095 if (found_key.objectid > inode->i_ino || 1107 if (found_key.objectid > ino ||
1096 found_key.type > BTRFS_EXTENT_DATA_KEY || 1108 found_key.type > BTRFS_EXTENT_DATA_KEY ||
1097 found_key.offset > end) 1109 found_key.offset > end)
1098 break; 1110 break;
@@ -1127,7 +1139,7 @@ next_slot:
1127 goto out_check; 1139 goto out_check;
1128 if (btrfs_extent_readonly(root, disk_bytenr)) 1140 if (btrfs_extent_readonly(root, disk_bytenr))
1129 goto out_check; 1141 goto out_check;
1130 if (btrfs_cross_ref_exist(trans, root, inode->i_ino, 1142 if (btrfs_cross_ref_exist(trans, root, ino,
1131 found_key.offset - 1143 found_key.offset -
1132 extent_offset, disk_bytenr)) 1144 extent_offset, disk_bytenr))
1133 goto out_check; 1145 goto out_check;
@@ -1316,8 +1328,7 @@ static int btrfs_set_bit_hook(struct inode *inode,
1316 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1328 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1317 struct btrfs_root *root = BTRFS_I(inode)->root; 1329 struct btrfs_root *root = BTRFS_I(inode)->root;
1318 u64 len = state->end + 1 - state->start; 1330 u64 len = state->end + 1 - state->start;
1319 int do_list = (root->root_key.objectid != 1331 bool do_list = !is_free_space_inode(root, inode);
1320 BTRFS_ROOT_TREE_OBJECTID);
1321 1332
1322 if (*bits & EXTENT_FIRST_DELALLOC) 1333 if (*bits & EXTENT_FIRST_DELALLOC)
1323 *bits &= ~EXTENT_FIRST_DELALLOC; 1334 *bits &= ~EXTENT_FIRST_DELALLOC;
@@ -1350,8 +1361,7 @@ static int btrfs_clear_bit_hook(struct inode *inode,
1350 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { 1361 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1351 struct btrfs_root *root = BTRFS_I(inode)->root; 1362 struct btrfs_root *root = BTRFS_I(inode)->root;
1352 u64 len = state->end + 1 - state->start; 1363 u64 len = state->end + 1 - state->start;
1353 int do_list = (root->root_key.objectid != 1364 bool do_list = !is_free_space_inode(root, inode);
1354 BTRFS_ROOT_TREE_OBJECTID);
1355 1365
1356 if (*bits & EXTENT_FIRST_DELALLOC) 1366 if (*bits & EXTENT_FIRST_DELALLOC)
1357 *bits &= ~EXTENT_FIRST_DELALLOC; 1367 *bits &= ~EXTENT_FIRST_DELALLOC;
@@ -1458,7 +1468,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1458 1468
1459 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 1469 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1460 1470
1461 if (root == root->fs_info->tree_root) 1471 if (is_free_space_inode(root, inode))
1462 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2); 1472 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
1463 else 1473 else
1464 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 1474 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
@@ -1644,7 +1654,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1644 &hint, 0); 1654 &hint, 0);
1645 BUG_ON(ret); 1655 BUG_ON(ret);
1646 1656
1647 ins.objectid = inode->i_ino; 1657 ins.objectid = btrfs_ino(inode);
1648 ins.offset = file_pos; 1658 ins.offset = file_pos;
1649 ins.type = BTRFS_EXTENT_DATA_KEY; 1659 ins.type = BTRFS_EXTENT_DATA_KEY;
1650 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); 1660 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
@@ -1675,7 +1685,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1675 ins.type = BTRFS_EXTENT_ITEM_KEY; 1685 ins.type = BTRFS_EXTENT_ITEM_KEY;
1676 ret = btrfs_alloc_reserved_file_extent(trans, root, 1686 ret = btrfs_alloc_reserved_file_extent(trans, root,
1677 root->root_key.objectid, 1687 root->root_key.objectid,
1678 inode->i_ino, file_pos, &ins); 1688 btrfs_ino(inode), file_pos, &ins);
1679 BUG_ON(ret); 1689 BUG_ON(ret);
1680 btrfs_free_path(path); 1690 btrfs_free_path(path);
1681 1691
@@ -1701,7 +1711,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1701 struct extent_state *cached_state = NULL; 1711 struct extent_state *cached_state = NULL;
1702 int compress_type = 0; 1712 int compress_type = 0;
1703 int ret; 1713 int ret;
1704 bool nolock = false; 1714 bool nolock;
1705 1715
1706 ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, 1716 ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
1707 end - start + 1); 1717 end - start + 1);
@@ -1709,7 +1719,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1709 return 0; 1719 return 0;
1710 BUG_ON(!ordered_extent); 1720 BUG_ON(!ordered_extent);
1711 1721
1712 nolock = (root == root->fs_info->tree_root); 1722 nolock = is_free_space_inode(root, inode);
1713 1723
1714 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 1724 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1715 BUG_ON(!list_empty(&ordered_extent->list)); 1725 BUG_ON(!list_empty(&ordered_extent->list));
@@ -2005,8 +2015,9 @@ good:
2005 2015
2006zeroit: 2016zeroit:
2007 if (printk_ratelimit()) { 2017 if (printk_ratelimit()) {
2008 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " 2018 printk(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
2009 "private %llu\n", page->mapping->host->i_ino, 2019 "private %llu\n",
2020 (unsigned long long)btrfs_ino(page->mapping->host),
2010 (unsigned long long)start, csum, 2021 (unsigned long long)start, csum,
2011 (unsigned long long)private); 2022 (unsigned long long)private);
2012 } 2023 }
@@ -2244,7 +2255,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2244 2255
2245 /* insert an orphan item to track this unlinked/truncated file */ 2256 /* insert an orphan item to track this unlinked/truncated file */
2246 if (insert >= 1) { 2257 if (insert >= 1) {
2247 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino); 2258 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
2248 BUG_ON(ret); 2259 BUG_ON(ret);
2249 } 2260 }
2250 2261
@@ -2281,7 +2292,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2281 spin_unlock(&root->orphan_lock); 2292 spin_unlock(&root->orphan_lock);
2282 2293
2283 if (trans && delete_item) { 2294 if (trans && delete_item) {
2284 ret = btrfs_del_orphan_item(trans, root, inode->i_ino); 2295 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
2285 BUG_ON(ret); 2296 BUG_ON(ret);
2286 } 2297 }
2287 2298
@@ -2543,7 +2554,8 @@ static void btrfs_read_locked_inode(struct inode *inode)
2543 * try to precache a NULL acl entry for files that don't have 2554 * try to precache a NULL acl entry for files that don't have
2544 * any xattrs or acls 2555 * any xattrs or acls
2545 */ 2556 */
2546 maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino); 2557 maybe_acls = acls_after_inode_item(leaf, path->slots[0],
2558 btrfs_ino(inode));
2547 if (!maybe_acls) 2559 if (!maybe_acls)
2548 cache_no_acl(inode); 2560 cache_no_acl(inode);
2549 2561
@@ -2689,6 +2701,8 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2689 struct btrfs_dir_item *di; 2701 struct btrfs_dir_item *di;
2690 struct btrfs_key key; 2702 struct btrfs_key key;
2691 u64 index; 2703 u64 index;
2704 u64 ino = btrfs_ino(inode);
2705 u64 dir_ino = btrfs_ino(dir);
2692 2706
2693 path = btrfs_alloc_path(); 2707 path = btrfs_alloc_path();
2694 if (!path) { 2708 if (!path) {
@@ -2697,7 +2711,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2697 } 2711 }
2698 2712
2699 path->leave_spinning = 1; 2713 path->leave_spinning = 1;
2700 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, 2714 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2701 name, name_len, -1); 2715 name, name_len, -1);
2702 if (IS_ERR(di)) { 2716 if (IS_ERR(di)) {
2703 ret = PTR_ERR(di); 2717 ret = PTR_ERR(di);
@@ -2714,17 +2728,16 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2714 goto err; 2728 goto err;
2715 btrfs_release_path(root, path); 2729 btrfs_release_path(root, path);
2716 2730
2717 ret = btrfs_del_inode_ref(trans, root, name, name_len, 2731 ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
2718 inode->i_ino, 2732 dir_ino, &index);
2719 dir->i_ino, &index);
2720 if (ret) { 2733 if (ret) {
2721 printk(KERN_INFO "btrfs failed to delete reference to %.*s, " 2734 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2722 "inode %lu parent %lu\n", name_len, name, 2735 "inode %llu parent %llu\n", name_len, name,
2723 inode->i_ino, dir->i_ino); 2736 (unsigned long long)ino, (unsigned long long)dir_ino);
2724 goto err; 2737 goto err;
2725 } 2738 }
2726 2739
2727 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, 2740 di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino,
2728 index, name, name_len, -1); 2741 index, name, name_len, -1);
2729 if (IS_ERR(di)) { 2742 if (IS_ERR(di)) {
2730 ret = PTR_ERR(di); 2743 ret = PTR_ERR(di);
@@ -2738,7 +2751,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2738 btrfs_release_path(root, path); 2751 btrfs_release_path(root, path);
2739 2752
2740 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, 2753 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2741 inode, dir->i_ino); 2754 inode, dir_ino);
2742 BUG_ON(ret != 0 && ret != -ENOENT); 2755 BUG_ON(ret != 0 && ret != -ENOENT);
2743 2756
2744 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, 2757 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
@@ -2816,12 +2829,14 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2816 int check_link = 1; 2829 int check_link = 1;
2817 int err = -ENOSPC; 2830 int err = -ENOSPC;
2818 int ret; 2831 int ret;
2832 u64 ino = btrfs_ino(inode);
2833 u64 dir_ino = btrfs_ino(dir);
2819 2834
2820 trans = btrfs_start_transaction(root, 10); 2835 trans = btrfs_start_transaction(root, 10);
2821 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) 2836 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
2822 return trans; 2837 return trans;
2823 2838
2824 if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 2839 if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
2825 return ERR_PTR(-ENOSPC); 2840 return ERR_PTR(-ENOSPC);
2826 2841
2827 /* check if there is someone else holds reference */ 2842 /* check if there is someone else holds reference */
@@ -2880,7 +2895,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2880 2895
2881 if (ret == 0 && S_ISREG(inode->i_mode)) { 2896 if (ret == 0 && S_ISREG(inode->i_mode)) {
2882 ret = btrfs_lookup_file_extent(trans, root, path, 2897 ret = btrfs_lookup_file_extent(trans, root, path,
2883 inode->i_ino, (u64)-1, 0); 2898 ino, (u64)-1, 0);
2884 if (ret < 0) { 2899 if (ret < 0) {
2885 err = ret; 2900 err = ret;
2886 goto out; 2901 goto out;
@@ -2896,7 +2911,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2896 goto out; 2911 goto out;
2897 } 2912 }
2898 2913
2899 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, 2914 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2900 dentry->d_name.name, dentry->d_name.len, 0); 2915 dentry->d_name.name, dentry->d_name.len, 0);
2901 if (IS_ERR(di)) { 2916 if (IS_ERR(di)) {
2902 err = PTR_ERR(di); 2917 err = PTR_ERR(di);
@@ -2913,7 +2928,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2913 2928
2914 ref = btrfs_lookup_inode_ref(trans, root, path, 2929 ref = btrfs_lookup_inode_ref(trans, root, path,
2915 dentry->d_name.name, dentry->d_name.len, 2930 dentry->d_name.name, dentry->d_name.len,
2916 inode->i_ino, dir->i_ino, 0); 2931 ino, dir_ino, 0);
2917 if (IS_ERR(ref)) { 2932 if (IS_ERR(ref)) {
2918 err = PTR_ERR(ref); 2933 err = PTR_ERR(ref);
2919 goto out; 2934 goto out;
@@ -2924,7 +2939,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2924 index = btrfs_inode_ref_index(path->nodes[0], ref); 2939 index = btrfs_inode_ref_index(path->nodes[0], ref);
2925 btrfs_release_path(root, path); 2940 btrfs_release_path(root, path);
2926 2941
2927 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index, 2942 di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
2928 dentry->d_name.name, dentry->d_name.len, 0); 2943 dentry->d_name.name, dentry->d_name.len, 0);
2929 if (IS_ERR(di)) { 2944 if (IS_ERR(di)) {
2930 err = PTR_ERR(di); 2945 err = PTR_ERR(di);
@@ -2999,12 +3014,13 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2999 struct btrfs_key key; 3014 struct btrfs_key key;
3000 u64 index; 3015 u64 index;
3001 int ret; 3016 int ret;
3017 u64 dir_ino = btrfs_ino(dir);
3002 3018
3003 path = btrfs_alloc_path(); 3019 path = btrfs_alloc_path();
3004 if (!path) 3020 if (!path)
3005 return -ENOMEM; 3021 return -ENOMEM;
3006 3022
3007 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, 3023 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3008 name, name_len, -1); 3024 name, name_len, -1);
3009 BUG_ON(!di || IS_ERR(di)); 3025 BUG_ON(!di || IS_ERR(di));
3010 3026
@@ -3017,10 +3033,10 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3017 3033
3018 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, 3034 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
3019 objectid, root->root_key.objectid, 3035 objectid, root->root_key.objectid,
3020 dir->i_ino, &index, name, name_len); 3036 dir_ino, &index, name, name_len);
3021 if (ret < 0) { 3037 if (ret < 0) {
3022 BUG_ON(ret != -ENOENT); 3038 BUG_ON(ret != -ENOENT);
3023 di = btrfs_search_dir_index_item(root, path, dir->i_ino, 3039 di = btrfs_search_dir_index_item(root, path, dir_ino,
3024 name, name_len); 3040 name, name_len);
3025 BUG_ON(!di || IS_ERR(di)); 3041 BUG_ON(!di || IS_ERR(di));
3026 3042
@@ -3030,7 +3046,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3030 index = key.offset; 3046 index = key.offset;
3031 } 3047 }
3032 3048
3033 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, 3049 di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino,
3034 index, name, name_len, -1); 3050 index, name, name_len, -1);
3035 BUG_ON(!di || IS_ERR(di)); 3051 BUG_ON(!di || IS_ERR(di));
3036 3052
@@ -3059,7 +3075,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3059 unsigned long nr = 0; 3075 unsigned long nr = 0;
3060 3076
3061 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE || 3077 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
3062 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 3078 btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
3063 return -ENOTEMPTY; 3079 return -ENOTEMPTY;
3064 3080
3065 trans = __unlink_start_trans(dir, dentry); 3081 trans = __unlink_start_trans(dir, dentry);
@@ -3068,7 +3084,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3068 3084
3069 btrfs_set_trans_block_group(trans, dir); 3085 btrfs_set_trans_block_group(trans, dir);
3070 3086
3071 if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 3087 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3072 err = btrfs_unlink_subvol(trans, root, dir, 3088 err = btrfs_unlink_subvol(trans, root, dir,
3073 BTRFS_I(inode)->location.objectid, 3089 BTRFS_I(inode)->location.objectid,
3074 dentry->d_name.name, 3090 dentry->d_name.name,
@@ -3300,6 +3316,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3300 int encoding; 3316 int encoding;
3301 int ret; 3317 int ret;
3302 int err = 0; 3318 int err = 0;
3319 u64 ino = btrfs_ino(inode);
3303 3320
3304 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); 3321 BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
3305 3322
@@ -3310,7 +3327,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3310 BUG_ON(!path); 3327 BUG_ON(!path);
3311 path->reada = -1; 3328 path->reada = -1;
3312 3329
3313 key.objectid = inode->i_ino; 3330 key.objectid = ino;
3314 key.offset = (u64)-1; 3331 key.offset = (u64)-1;
3315 key.type = (u8)-1; 3332 key.type = (u8)-1;
3316 3333
@@ -3338,7 +3355,7 @@ search_again:
3338 found_type = btrfs_key_type(&found_key); 3355 found_type = btrfs_key_type(&found_key);
3339 encoding = 0; 3356 encoding = 0;
3340 3357
3341 if (found_key.objectid != inode->i_ino) 3358 if (found_key.objectid != ino)
3342 break; 3359 break;
3343 3360
3344 if (found_type < min_type) 3361 if (found_type < min_type)
@@ -3457,7 +3474,7 @@ delete:
3457 ret = btrfs_free_extent(trans, root, extent_start, 3474 ret = btrfs_free_extent(trans, root, extent_start,
3458 extent_num_bytes, 0, 3475 extent_num_bytes, 0,
3459 btrfs_header_owner(leaf), 3476 btrfs_header_owner(leaf),
3460 inode->i_ino, extent_offset); 3477 ino, extent_offset);
3461 BUG_ON(ret); 3478 BUG_ON(ret);
3462 } 3479 }
3463 3480
@@ -3466,7 +3483,9 @@ delete:
3466 3483
3467 if (path->slots[0] == 0 || 3484 if (path->slots[0] == 0 ||
3468 path->slots[0] != pending_del_slot) { 3485 path->slots[0] != pending_del_slot) {
3469 if (root->ref_cows) { 3486 if (root->ref_cows &&
3487 BTRFS_I(inode)->location.objectid !=
3488 BTRFS_FREE_INO_OBJECTID) {
3470 err = -EAGAIN; 3489 err = -EAGAIN;
3471 goto out; 3490 goto out;
3472 } 3491 }
@@ -3656,7 +3675,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3656 break; 3675 break;
3657 3676
3658 err = btrfs_insert_file_extent(trans, root, 3677 err = btrfs_insert_file_extent(trans, root,
3659 inode->i_ino, cur_offset, 0, 3678 btrfs_ino(inode), cur_offset, 0,
3660 0, hole_size, 0, hole_size, 3679 0, hole_size, 0, hole_size,
3661 0, 0, 0); 3680 0, 0, 0);
3662 if (err) 3681 if (err)
@@ -3758,7 +3777,7 @@ void btrfs_evict_inode(struct inode *inode)
3758 3777
3759 truncate_inode_pages(&inode->i_data, 0); 3778 truncate_inode_pages(&inode->i_data, 0);
3760 if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || 3779 if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
3761 root == root->fs_info->tree_root)) 3780 is_free_space_inode(root, inode)))
3762 goto no_delete; 3781 goto no_delete;
3763 3782
3764 if (is_bad_inode(inode)) { 3783 if (is_bad_inode(inode)) {
@@ -3811,6 +3830,10 @@ void btrfs_evict_inode(struct inode *inode)
3811 BUG_ON(ret); 3830 BUG_ON(ret);
3812 } 3831 }
3813 3832
3833 if (!(root == root->fs_info->tree_root ||
3834 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
3835 btrfs_return_ino(root, btrfs_ino(inode));
3836
3814 nr = trans->blocks_used; 3837 nr = trans->blocks_used;
3815 btrfs_end_transaction(trans, root); 3838 btrfs_end_transaction(trans, root);
3816 btrfs_btree_balance_dirty(root, nr); 3839 btrfs_btree_balance_dirty(root, nr);
@@ -3836,7 +3859,7 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3836 path = btrfs_alloc_path(); 3859 path = btrfs_alloc_path();
3837 BUG_ON(!path); 3860 BUG_ON(!path);
3838 3861
3839 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name, 3862 di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
3840 namelen, 0); 3863 namelen, 0);
3841 if (IS_ERR(di)) 3864 if (IS_ERR(di))
3842 ret = PTR_ERR(di); 3865 ret = PTR_ERR(di);
@@ -3889,7 +3912,7 @@ static int fixup_tree_root_location(struct btrfs_root *root,
3889 3912
3890 leaf = path->nodes[0]; 3913 leaf = path->nodes[0];
3891 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 3914 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3892 if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino || 3915 if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
3893 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) 3916 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3894 goto out; 3917 goto out;
3895 3918
@@ -3928,6 +3951,7 @@ static void inode_tree_add(struct inode *inode)
3928 struct btrfs_inode *entry; 3951 struct btrfs_inode *entry;
3929 struct rb_node **p; 3952 struct rb_node **p;
3930 struct rb_node *parent; 3953 struct rb_node *parent;
3954 u64 ino = btrfs_ino(inode);
3931again: 3955again:
3932 p = &root->inode_tree.rb_node; 3956 p = &root->inode_tree.rb_node;
3933 parent = NULL; 3957 parent = NULL;
@@ -3940,9 +3964,9 @@ again:
3940 parent = *p; 3964 parent = *p;
3941 entry = rb_entry(parent, struct btrfs_inode, rb_node); 3965 entry = rb_entry(parent, struct btrfs_inode, rb_node);
3942 3966
3943 if (inode->i_ino < entry->vfs_inode.i_ino) 3967 if (ino < btrfs_ino(&entry->vfs_inode))
3944 p = &parent->rb_left; 3968 p = &parent->rb_left;
3945 else if (inode->i_ino > entry->vfs_inode.i_ino) 3969 else if (ino > btrfs_ino(&entry->vfs_inode))
3946 p = &parent->rb_right; 3970 p = &parent->rb_right;
3947 else { 3971 else {
3948 WARN_ON(!(entry->vfs_inode.i_state & 3972 WARN_ON(!(entry->vfs_inode.i_state &
@@ -4006,9 +4030,9 @@ again:
4006 prev = node; 4030 prev = node;
4007 entry = rb_entry(node, struct btrfs_inode, rb_node); 4031 entry = rb_entry(node, struct btrfs_inode, rb_node);
4008 4032
4009 if (objectid < entry->vfs_inode.i_ino) 4033 if (objectid < btrfs_ino(&entry->vfs_inode))
4010 node = node->rb_left; 4034 node = node->rb_left;
4011 else if (objectid > entry->vfs_inode.i_ino) 4035 else if (objectid > btrfs_ino(&entry->vfs_inode))
4012 node = node->rb_right; 4036 node = node->rb_right;
4013 else 4037 else
4014 break; 4038 break;
@@ -4016,7 +4040,7 @@ again:
4016 if (!node) { 4040 if (!node) {
4017 while (prev) { 4041 while (prev) {
4018 entry = rb_entry(prev, struct btrfs_inode, rb_node); 4042 entry = rb_entry(prev, struct btrfs_inode, rb_node);
4019 if (objectid <= entry->vfs_inode.i_ino) { 4043 if (objectid <= btrfs_ino(&entry->vfs_inode)) {
4020 node = prev; 4044 node = prev;
4021 break; 4045 break;
4022 } 4046 }
@@ -4025,7 +4049,7 @@ again:
4025 } 4049 }
4026 while (node) { 4050 while (node) {
4027 entry = rb_entry(node, struct btrfs_inode, rb_node); 4051 entry = rb_entry(node, struct btrfs_inode, rb_node);
4028 objectid = entry->vfs_inode.i_ino + 1; 4052 objectid = btrfs_ino(&entry->vfs_inode) + 1;
4029 inode = igrab(&entry->vfs_inode); 4053 inode = igrab(&entry->vfs_inode);
4030 if (inode) { 4054 if (inode) {
4031 spin_unlock(&root->inode_lock); 4055 spin_unlock(&root->inode_lock);
@@ -4063,7 +4087,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
4063static int btrfs_find_actor(struct inode *inode, void *opaque) 4087static int btrfs_find_actor(struct inode *inode, void *opaque)
4064{ 4088{
4065 struct btrfs_iget_args *args = opaque; 4089 struct btrfs_iget_args *args = opaque;
4066 return args->ino == inode->i_ino && 4090 return args->ino == btrfs_ino(inode) &&
4067 args->root == BTRFS_I(inode)->root; 4091 args->root == BTRFS_I(inode)->root;
4068} 4092}
4069 4093
@@ -4241,9 +4265,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4241 4265
4242 /* special case for "." */ 4266 /* special case for "." */
4243 if (filp->f_pos == 0) { 4267 if (filp->f_pos == 0) {
4244 over = filldir(dirent, ".", 1, 4268 over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR);
4245 1, inode->i_ino,
4246 DT_DIR);
4247 if (over) 4269 if (over)
4248 return 0; 4270 return 0;
4249 filp->f_pos = 1; 4271 filp->f_pos = 1;
@@ -4262,7 +4284,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4262 4284
4263 btrfs_set_key_type(&key, key_type); 4285 btrfs_set_key_type(&key, key_type);
4264 key.offset = filp->f_pos; 4286 key.offset = filp->f_pos;
4265 key.objectid = inode->i_ino; 4287 key.objectid = btrfs_ino(inode);
4266 4288
4267 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4289 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4268 if (ret < 0) 4290 if (ret < 0)
@@ -4372,7 +4394,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4372 return 0; 4394 return 0;
4373 4395
4374 smp_mb(); 4396 smp_mb();
4375 nolock = (root->fs_info->closing && root == root->fs_info->tree_root); 4397 if (root->fs_info->closing && is_free_space_inode(root, inode))
4398 nolock = true;
4376 4399
4377 if (wbc->sync_mode == WB_SYNC_ALL) { 4400 if (wbc->sync_mode == WB_SYNC_ALL) {
4378 if (nolock) 4401 if (nolock)
@@ -4417,8 +4440,9 @@ void btrfs_dirty_inode(struct inode *inode)
4417 if (IS_ERR(trans)) { 4440 if (IS_ERR(trans)) {
4418 if (printk_ratelimit()) { 4441 if (printk_ratelimit()) {
4419 printk(KERN_ERR "btrfs: fail to " 4442 printk(KERN_ERR "btrfs: fail to "
4420 "dirty inode %lu error %ld\n", 4443 "dirty inode %llu error %ld\n",
4421 inode->i_ino, PTR_ERR(trans)); 4444 (unsigned long long)btrfs_ino(inode),
4445 PTR_ERR(trans));
4422 } 4446 }
4423 return; 4447 return;
4424 } 4448 }
@@ -4428,8 +4452,9 @@ void btrfs_dirty_inode(struct inode *inode)
4428 if (ret) { 4452 if (ret) {
4429 if (printk_ratelimit()) { 4453 if (printk_ratelimit()) {
4430 printk(KERN_ERR "btrfs: fail to " 4454 printk(KERN_ERR "btrfs: fail to "
4431 "dirty inode %lu error %d\n", 4455 "dirty inode %llu error %d\n",
4432 inode->i_ino, ret); 4456 (unsigned long long)btrfs_ino(inode),
4457 ret);
4433 } 4458 }
4434 } 4459 }
4435 } 4460 }
@@ -4449,7 +4474,7 @@ static int btrfs_set_inode_index_count(struct inode *inode)
4449 struct extent_buffer *leaf; 4474 struct extent_buffer *leaf;
4450 int ret; 4475 int ret;
4451 4476
4452 key.objectid = inode->i_ino; 4477 key.objectid = btrfs_ino(inode);
4453 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); 4478 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
4454 key.offset = (u64)-1; 4479 key.offset = (u64)-1;
4455 4480
@@ -4481,7 +4506,7 @@ static int btrfs_set_inode_index_count(struct inode *inode)
4481 leaf = path->nodes[0]; 4506 leaf = path->nodes[0];
4482 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 4507 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4483 4508
4484 if (found_key.objectid != inode->i_ino || 4509 if (found_key.objectid != btrfs_ino(inode) ||
4485 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) { 4510 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
4486 BTRFS_I(inode)->index_cnt = 2; 4511 BTRFS_I(inode)->index_cnt = 2;
4487 goto out; 4512 goto out;
@@ -4540,6 +4565,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4540 return ERR_PTR(-ENOMEM); 4565 return ERR_PTR(-ENOMEM);
4541 } 4566 }
4542 4567
4568 /*
4569 * we have to initialize this early, so we can reclaim the inode
4570 * number if we fail afterwards in this function.
4571 */
4572 inode->i_ino = objectid;
4573
4543 if (dir) { 4574 if (dir) {
4544 trace_btrfs_inode_request(dir); 4575 trace_btrfs_inode_request(dir);
4545 4576
@@ -4585,7 +4616,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4585 goto fail; 4616 goto fail;
4586 4617
4587 inode_init_owner(inode, dir, mode); 4618 inode_init_owner(inode, dir, mode);
4588 inode->i_ino = objectid;
4589 inode_set_bytes(inode, 0); 4619 inode_set_bytes(inode, 0);
4590 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 4620 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
4591 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 4621 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
@@ -4649,29 +4679,29 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
4649 int ret = 0; 4679 int ret = 0;
4650 struct btrfs_key key; 4680 struct btrfs_key key;
4651 struct btrfs_root *root = BTRFS_I(parent_inode)->root; 4681 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4682 u64 ino = btrfs_ino(inode);
4683 u64 parent_ino = btrfs_ino(parent_inode);
4652 4684
4653 if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { 4685 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4654 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); 4686 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4655 } else { 4687 } else {
4656 key.objectid = inode->i_ino; 4688 key.objectid = ino;
4657 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); 4689 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4658 key.offset = 0; 4690 key.offset = 0;
4659 } 4691 }
4660 4692
4661 if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { 4693 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4662 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, 4694 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4663 key.objectid, root->root_key.objectid, 4695 key.objectid, root->root_key.objectid,
4664 parent_inode->i_ino, 4696 parent_ino, index, name, name_len);
4665 index, name, name_len);
4666 } else if (add_backref) { 4697 } else if (add_backref) {
4667 ret = btrfs_insert_inode_ref(trans, root, 4698 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
4668 name, name_len, inode->i_ino, 4699 parent_ino, index);
4669 parent_inode->i_ino, index);
4670 } 4700 }
4671 4701
4672 if (ret == 0) { 4702 if (ret == 0) {
4673 ret = btrfs_insert_dir_item(trans, root, name, name_len, 4703 ret = btrfs_insert_dir_item(trans, root, name, name_len,
4674 parent_inode->i_ino, &key, 4704 parent_ino, &key,
4675 btrfs_inode_type(inode), index); 4705 btrfs_inode_type(inode), index);
4676 BUG_ON(ret); 4706 BUG_ON(ret);
4677 4707
@@ -4714,10 +4744,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4714 if (!new_valid_dev(rdev)) 4744 if (!new_valid_dev(rdev))
4715 return -EINVAL; 4745 return -EINVAL;
4716 4746
4717 err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
4718 if (err)
4719 return err;
4720
4721 /* 4747 /*
4722 * 2 for inode item and ref 4748 * 2 for inode item and ref
4723 * 2 for dir items 4749 * 2 for dir items
@@ -4729,8 +4755,12 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4729 4755
4730 btrfs_set_trans_block_group(trans, dir); 4756 btrfs_set_trans_block_group(trans, dir);
4731 4757
4758 err = btrfs_find_free_ino(root, &objectid);
4759 if (err)
4760 goto out_unlock;
4761
4732 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4762 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4733 dentry->d_name.len, dir->i_ino, objectid, 4763 dentry->d_name.len, btrfs_ino(dir), objectid,
4734 BTRFS_I(dir)->block_group, mode, &index); 4764 BTRFS_I(dir)->block_group, mode, &index);
4735 if (IS_ERR(inode)) { 4765 if (IS_ERR(inode)) {
4736 err = PTR_ERR(inode); 4766 err = PTR_ERR(inode);
@@ -4777,9 +4807,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4777 u64 objectid; 4807 u64 objectid;
4778 u64 index = 0; 4808 u64 index = 0;
4779 4809
4780 err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
4781 if (err)
4782 return err;
4783 /* 4810 /*
4784 * 2 for inode item and ref 4811 * 2 for inode item and ref
4785 * 2 for dir items 4812 * 2 for dir items
@@ -4791,8 +4818,12 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4791 4818
4792 btrfs_set_trans_block_group(trans, dir); 4819 btrfs_set_trans_block_group(trans, dir);
4793 4820
4821 err = btrfs_find_free_ino(root, &objectid);
4822 if (err)
4823 goto out_unlock;
4824
4794 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4825 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4795 dentry->d_name.len, dir->i_ino, objectid, 4826 dentry->d_name.len, btrfs_ino(dir), objectid,
4796 BTRFS_I(dir)->block_group, mode, &index); 4827 BTRFS_I(dir)->block_group, mode, &index);
4797 if (IS_ERR(inode)) { 4828 if (IS_ERR(inode)) {
4798 err = PTR_ERR(inode); 4829 err = PTR_ERR(inode);
@@ -4903,10 +4934,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4903 u64 index = 0; 4934 u64 index = 0;
4904 unsigned long nr = 1; 4935 unsigned long nr = 1;
4905 4936
4906 err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
4907 if (err)
4908 return err;
4909
4910 /* 4937 /*
4911 * 2 items for inode and ref 4938 * 2 items for inode and ref
4912 * 2 items for dir items 4939 * 2 items for dir items
@@ -4917,8 +4944,12 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4917 return PTR_ERR(trans); 4944 return PTR_ERR(trans);
4918 btrfs_set_trans_block_group(trans, dir); 4945 btrfs_set_trans_block_group(trans, dir);
4919 4946
4947 err = btrfs_find_free_ino(root, &objectid);
4948 if (err)
4949 goto out_fail;
4950
4920 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4951 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4921 dentry->d_name.len, dir->i_ino, objectid, 4952 dentry->d_name.len, btrfs_ino(dir), objectid,
4922 BTRFS_I(dir)->block_group, S_IFDIR | mode, 4953 BTRFS_I(dir)->block_group, S_IFDIR | mode,
4923 &index); 4954 &index);
4924 if (IS_ERR(inode)) { 4955 if (IS_ERR(inode)) {
@@ -5041,7 +5072,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
5041 u64 bytenr; 5072 u64 bytenr;
5042 u64 extent_start = 0; 5073 u64 extent_start = 0;
5043 u64 extent_end = 0; 5074 u64 extent_end = 0;
5044 u64 objectid = inode->i_ino; 5075 u64 objectid = btrfs_ino(inode);
5045 u32 found_type; 5076 u32 found_type;
5046 struct btrfs_path *path = NULL; 5077 struct btrfs_path *path = NULL;
5047 struct btrfs_root *root = BTRFS_I(inode)->root; 5078 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -5549,7 +5580,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
5549 if (!path) 5580 if (!path)
5550 return -ENOMEM; 5581 return -ENOMEM;
5551 5582
5552 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 5583 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
5553 offset, 0); 5584 offset, 0);
5554 if (ret < 0) 5585 if (ret < 0)
5555 goto out; 5586 goto out;
@@ -5566,7 +5597,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
5566 ret = 0; 5597 ret = 0;
5567 leaf = path->nodes[0]; 5598 leaf = path->nodes[0];
5568 btrfs_item_key_to_cpu(leaf, &key, slot); 5599 btrfs_item_key_to_cpu(leaf, &key, slot);
5569 if (key.objectid != inode->i_ino || 5600 if (key.objectid != btrfs_ino(inode) ||
5570 key.type != BTRFS_EXTENT_DATA_KEY) { 5601 key.type != BTRFS_EXTENT_DATA_KEY) {
5571 /* not our file or wrong item type, must cow */ 5602 /* not our file or wrong item type, must cow */
5572 goto out; 5603 goto out;
@@ -5600,7 +5631,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
5600 * look for other files referencing this extent, if we 5631 * look for other files referencing this extent, if we
5601 * find any we must cow 5632 * find any we must cow
5602 */ 5633 */
5603 if (btrfs_cross_ref_exist(trans, root, inode->i_ino, 5634 if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
5604 key.offset - backref_offset, disk_bytenr)) 5635 key.offset - backref_offset, disk_bytenr))
5605 goto out; 5636 goto out;
5606 5637
@@ -5790,9 +5821,10 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
5790 5821
5791 flush_dcache_page(bvec->bv_page); 5822 flush_dcache_page(bvec->bv_page);
5792 if (csum != *private) { 5823 if (csum != *private) {
5793 printk(KERN_ERR "btrfs csum failed ino %lu off" 5824 printk(KERN_ERR "btrfs csum failed ino %llu off"
5794 " %llu csum %u private %u\n", 5825 " %llu csum %u private %u\n",
5795 inode->i_ino, (unsigned long long)start, 5826 (unsigned long long)btrfs_ino(inode),
5827 (unsigned long long)start,
5796 csum, *private); 5828 csum, *private);
5797 err = -EIO; 5829 err = -EIO;
5798 } 5830 }
@@ -5939,9 +5971,9 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
5939 struct btrfs_dio_private *dip = bio->bi_private; 5971 struct btrfs_dio_private *dip = bio->bi_private;
5940 5972
5941 if (err) { 5973 if (err) {
5942 printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu " 5974 printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
5943 "sector %#Lx len %u err no %d\n", 5975 "sector %#Lx len %u err no %d\n",
5944 dip->inode->i_ino, bio->bi_rw, 5976 (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
5945 (unsigned long long)bio->bi_sector, bio->bi_size, err); 5977 (unsigned long long)bio->bi_sector, bio->bi_size, err);
5946 dip->errors = 1; 5978 dip->errors = 1;
5947 5979
@@ -6851,8 +6883,8 @@ void btrfs_destroy_inode(struct inode *inode)
6851 6883
6852 spin_lock(&root->orphan_lock); 6884 spin_lock(&root->orphan_lock);
6853 if (!list_empty(&BTRFS_I(inode)->i_orphan)) { 6885 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
6854 printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n", 6886 printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
6855 inode->i_ino); 6887 (unsigned long long)btrfs_ino(inode));
6856 list_del_init(&BTRFS_I(inode)->i_orphan); 6888 list_del_init(&BTRFS_I(inode)->i_orphan);
6857 } 6889 }
6858 spin_unlock(&root->orphan_lock); 6890 spin_unlock(&root->orphan_lock);
@@ -6882,7 +6914,7 @@ int btrfs_drop_inode(struct inode *inode)
6882 struct btrfs_root *root = BTRFS_I(inode)->root; 6914 struct btrfs_root *root = BTRFS_I(inode)->root;
6883 6915
6884 if (btrfs_root_refs(&root->root_item) == 0 && 6916 if (btrfs_root_refs(&root->root_item) == 0 &&
6885 root != root->fs_info->tree_root) 6917 !is_free_space_inode(root, inode))
6886 return 1; 6918 return 1;
6887 else 6919 else
6888 return generic_drop_inode(inode); 6920 return generic_drop_inode(inode);
@@ -6991,16 +7023,17 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
6991 u64 index = 0; 7023 u64 index = 0;
6992 u64 root_objectid; 7024 u64 root_objectid;
6993 int ret; 7025 int ret;
7026 u64 old_ino = btrfs_ino(old_inode);
6994 7027
6995 if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 7028 if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
6996 return -EPERM; 7029 return -EPERM;
6997 7030
6998 /* we only allow rename subvolume link between subvolumes */ 7031 /* we only allow rename subvolume link between subvolumes */
6999 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 7032 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
7000 return -EXDEV; 7033 return -EXDEV;
7001 7034
7002 if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || 7035 if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
7003 (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) 7036 (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
7004 return -ENOTEMPTY; 7037 return -ENOTEMPTY;
7005 7038
7006 if (S_ISDIR(old_inode->i_mode) && new_inode && 7039 if (S_ISDIR(old_inode->i_mode) && new_inode &&
@@ -7016,7 +7049,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7016 filemap_flush(old_inode->i_mapping); 7049 filemap_flush(old_inode->i_mapping);
7017 7050
7018 /* close the racy window with snapshot create/destroy ioctl */ 7051 /* close the racy window with snapshot create/destroy ioctl */
7019 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 7052 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
7020 down_read(&root->fs_info->subvol_sem); 7053 down_read(&root->fs_info->subvol_sem);
7021 /* 7054 /*
7022 * We want to reserve the absolute worst case amount of items. So if 7055 * We want to reserve the absolute worst case amount of items. So if
@@ -7041,15 +7074,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7041 if (ret) 7074 if (ret)
7042 goto out_fail; 7075 goto out_fail;
7043 7076
7044 if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { 7077 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7045 /* force full log commit if subvolume involved. */ 7078 /* force full log commit if subvolume involved. */
7046 root->fs_info->last_trans_log_full_commit = trans->transid; 7079 root->fs_info->last_trans_log_full_commit = trans->transid;
7047 } else { 7080 } else {
7048 ret = btrfs_insert_inode_ref(trans, dest, 7081 ret = btrfs_insert_inode_ref(trans, dest,
7049 new_dentry->d_name.name, 7082 new_dentry->d_name.name,
7050 new_dentry->d_name.len, 7083 new_dentry->d_name.len,
7051 old_inode->i_ino, 7084 old_ino,
7052 new_dir->i_ino, index); 7085 btrfs_ino(new_dir), index);
7053 if (ret) 7086 if (ret)
7054 goto out_fail; 7087 goto out_fail;
7055 /* 7088 /*
@@ -7065,10 +7098,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7065 * make sure the inode gets flushed if it is replacing 7098 * make sure the inode gets flushed if it is replacing
7066 * something. 7099 * something.
7067 */ 7100 */
7068 if (new_inode && new_inode->i_size && 7101 if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
7069 old_inode && S_ISREG(old_inode->i_mode)) {
7070 btrfs_add_ordered_operation(trans, root, old_inode); 7102 btrfs_add_ordered_operation(trans, root, old_inode);
7071 }
7072 7103
7073 old_dir->i_ctime = old_dir->i_mtime = ctime; 7104 old_dir->i_ctime = old_dir->i_mtime = ctime;
7074 new_dir->i_ctime = new_dir->i_mtime = ctime; 7105 new_dir->i_ctime = new_dir->i_mtime = ctime;
@@ -7077,7 +7108,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7077 if (old_dentry->d_parent != new_dentry->d_parent) 7108 if (old_dentry->d_parent != new_dentry->d_parent)
7078 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); 7109 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
7079 7110
7080 if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { 7111 if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
7081 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; 7112 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
7082 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, 7113 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
7083 old_dentry->d_name.name, 7114 old_dentry->d_name.name,
@@ -7094,7 +7125,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7094 7125
7095 if (new_inode) { 7126 if (new_inode) {
7096 new_inode->i_ctime = CURRENT_TIME; 7127 new_inode->i_ctime = CURRENT_TIME;
7097 if (unlikely(new_inode->i_ino == 7128 if (unlikely(btrfs_ino(new_inode) ==
7098 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 7129 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
7099 root_objectid = BTRFS_I(new_inode)->location.objectid; 7130 root_objectid = BTRFS_I(new_inode)->location.objectid;
7100 ret = btrfs_unlink_subvol(trans, dest, new_dir, 7131 ret = btrfs_unlink_subvol(trans, dest, new_dir,
@@ -7122,7 +7153,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7122 new_dentry->d_name.len, 0, index); 7153 new_dentry->d_name.len, 0, index);
7123 BUG_ON(ret); 7154 BUG_ON(ret);
7124 7155
7125 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { 7156 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
7126 struct dentry *parent = dget_parent(new_dentry); 7157 struct dentry *parent = dget_parent(new_dentry);
7127 btrfs_log_new_name(trans, old_inode, old_dir, parent); 7158 btrfs_log_new_name(trans, old_inode, old_dir, parent);
7128 dput(parent); 7159 dput(parent);
@@ -7131,7 +7162,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7131out_fail: 7162out_fail:
7132 btrfs_end_transaction_throttle(trans, root); 7163 btrfs_end_transaction_throttle(trans, root);
7133out_notrans: 7164out_notrans:
7134 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 7165 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
7135 up_read(&root->fs_info->subvol_sem); 7166 up_read(&root->fs_info->subvol_sem);
7136 7167
7137 return ret; 7168 return ret;
@@ -7260,9 +7291,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7260 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) 7291 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
7261 return -ENAMETOOLONG; 7292 return -ENAMETOOLONG;
7262 7293
7263 err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
7264 if (err)
7265 return err;
7266 /* 7294 /*
7267 * 2 items for inode item and ref 7295 * 2 items for inode item and ref
7268 * 2 items for dir items 7296 * 2 items for dir items
@@ -7274,8 +7302,12 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7274 7302
7275 btrfs_set_trans_block_group(trans, dir); 7303 btrfs_set_trans_block_group(trans, dir);
7276 7304
7305 err = btrfs_find_free_ino(root, &objectid);
7306 if (err)
7307 goto out_unlock;
7308
7277 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 7309 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
7278 dentry->d_name.len, dir->i_ino, objectid, 7310 dentry->d_name.len, btrfs_ino(dir), objectid,
7279 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, 7311 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
7280 &index); 7312 &index);
7281 if (IS_ERR(inode)) { 7313 if (IS_ERR(inode)) {
@@ -7307,7 +7339,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7307 7339
7308 path = btrfs_alloc_path(); 7340 path = btrfs_alloc_path();
7309 BUG_ON(!path); 7341 BUG_ON(!path);
7310 key.objectid = inode->i_ino; 7342 key.objectid = btrfs_ino(inode);
7311 key.offset = 0; 7343 key.offset = 0;
7312 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); 7344 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
7313 datasize = btrfs_file_extent_calc_inline_size(name_len); 7345 datasize = btrfs_file_extent_calc_inline_size(name_len);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 2616f7ed479..bc5c2b01bf3 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -50,6 +50,7 @@
50#include "print-tree.h" 50#include "print-tree.h"
51#include "volumes.h" 51#include "volumes.h"
52#include "locking.h" 52#include "locking.h"
53#include "inode-map.h"
53 54
54/* Mask out flags that are inappropriate for the given type of inode. */ 55/* Mask out flags that are inappropriate for the given type of inode. */
55static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) 56static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -329,8 +330,7 @@ static noinline int create_subvol(struct btrfs_root *root,
329 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; 330 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
330 u64 index = 0; 331 u64 index = 0;
331 332
332 ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root, 333 ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
333 0, &objectid);
334 if (ret) { 334 if (ret) {
335 dput(parent); 335 dput(parent);
336 return ret; 336 return ret;
@@ -422,7 +422,7 @@ static noinline int create_subvol(struct btrfs_root *root,
422 BUG_ON(ret); 422 BUG_ON(ret);
423 423
424 ret = btrfs_insert_dir_item(trans, root, 424 ret = btrfs_insert_dir_item(trans, root,
425 name, namelen, dir->i_ino, &key, 425 name, namelen, btrfs_ino(dir), &key,
426 BTRFS_FT_DIR, index); 426 BTRFS_FT_DIR, index);
427 if (ret) 427 if (ret)
428 goto fail; 428 goto fail;
@@ -433,7 +433,7 @@ static noinline int create_subvol(struct btrfs_root *root,
433 433
434 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, 434 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
435 objectid, root->root_key.objectid, 435 objectid, root->root_key.objectid,
436 dir->i_ino, index, name, namelen); 436 btrfs_ino(dir), index, name, namelen);
437 437
438 BUG_ON(ret); 438 BUG_ON(ret);
439 439
@@ -1129,7 +1129,7 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
1129 int ret = 0; 1129 int ret = 0;
1130 u64 flags = 0; 1130 u64 flags = 0;
1131 1131
1132 if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) 1132 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
1133 return -EINVAL; 1133 return -EINVAL;
1134 1134
1135 down_read(&root->fs_info->subvol_sem); 1135 down_read(&root->fs_info->subvol_sem);
@@ -1156,7 +1156,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
1156 if (root->fs_info->sb->s_flags & MS_RDONLY) 1156 if (root->fs_info->sb->s_flags & MS_RDONLY)
1157 return -EROFS; 1157 return -EROFS;
1158 1158
1159 if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) 1159 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
1160 return -EINVAL; 1160 return -EINVAL;
1161 1161
1162 if (copy_from_user(&flags, arg, sizeof(flags))) 1162 if (copy_from_user(&flags, arg, sizeof(flags)))
@@ -1639,7 +1639,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
1639 goto out_dput; 1639 goto out_dput;
1640 } 1640 }
1641 1641
1642 if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { 1642 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
1643 err = -EINVAL; 1643 err = -EINVAL;
1644 goto out_dput; 1644 goto out_dput;
1645 } 1645 }
@@ -1925,7 +1925,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1925 } 1925 }
1926 1926
1927 /* clone data */ 1927 /* clone data */
1928 key.objectid = src->i_ino; 1928 key.objectid = btrfs_ino(src);
1929 key.type = BTRFS_EXTENT_DATA_KEY; 1929 key.type = BTRFS_EXTENT_DATA_KEY;
1930 key.offset = 0; 1930 key.offset = 0;
1931 1931
@@ -1952,7 +1952,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1952 1952
1953 btrfs_item_key_to_cpu(leaf, &key, slot); 1953 btrfs_item_key_to_cpu(leaf, &key, slot);
1954 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY || 1954 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
1955 key.objectid != src->i_ino) 1955 key.objectid != btrfs_ino(src))
1956 break; 1956 break;
1957 1957
1958 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) { 1958 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
@@ -1995,7 +1995,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1995 goto next; 1995 goto next;
1996 1996
1997 memcpy(&new_key, &key, sizeof(new_key)); 1997 memcpy(&new_key, &key, sizeof(new_key));
1998 new_key.objectid = inode->i_ino; 1998 new_key.objectid = btrfs_ino(inode);
1999 if (off <= key.offset) 1999 if (off <= key.offset)
2000 new_key.offset = key.offset + destoff - off; 2000 new_key.offset = key.offset + destoff - off;
2001 else 2001 else
@@ -2049,7 +2049,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2049 ret = btrfs_inc_extent_ref(trans, root, 2049 ret = btrfs_inc_extent_ref(trans, root,
2050 disko, diskl, 0, 2050 disko, diskl, 0,
2051 root->root_key.objectid, 2051 root->root_key.objectid,
2052 inode->i_ino, 2052 btrfs_ino(inode),
2053 new_key.offset - datao); 2053 new_key.offset - datao);
2054 BUG_ON(ret); 2054 BUG_ON(ret);
2055 } 2055 }
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 199a8013431..4b5b91cf48e 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -30,6 +30,7 @@
30#include "btrfs_inode.h" 30#include "btrfs_inode.h"
31#include "async-thread.h" 31#include "async-thread.h"
32#include "free-space-cache.h" 32#include "free-space-cache.h"
33#include "inode-map.h"
33 34
34/* 35/*
35 * backref_node, mapping_node and tree_block start with this 36 * backref_node, mapping_node and tree_block start with this
@@ -1409,9 +1410,9 @@ again:
1409 prev = node; 1410 prev = node;
1410 entry = rb_entry(node, struct btrfs_inode, rb_node); 1411 entry = rb_entry(node, struct btrfs_inode, rb_node);
1411 1412
1412 if (objectid < entry->vfs_inode.i_ino) 1413 if (objectid < btrfs_ino(&entry->vfs_inode))
1413 node = node->rb_left; 1414 node = node->rb_left;
1414 else if (objectid > entry->vfs_inode.i_ino) 1415 else if (objectid > btrfs_ino(&entry->vfs_inode))
1415 node = node->rb_right; 1416 node = node->rb_right;
1416 else 1417 else
1417 break; 1418 break;
@@ -1419,7 +1420,7 @@ again:
1419 if (!node) { 1420 if (!node) {
1420 while (prev) { 1421 while (prev) {
1421 entry = rb_entry(prev, struct btrfs_inode, rb_node); 1422 entry = rb_entry(prev, struct btrfs_inode, rb_node);
1422 if (objectid <= entry->vfs_inode.i_ino) { 1423 if (objectid <= btrfs_ino(&entry->vfs_inode)) {
1423 node = prev; 1424 node = prev;
1424 break; 1425 break;
1425 } 1426 }
@@ -1434,7 +1435,7 @@ again:
1434 return inode; 1435 return inode;
1435 } 1436 }
1436 1437
1437 objectid = entry->vfs_inode.i_ino + 1; 1438 objectid = btrfs_ino(&entry->vfs_inode) + 1;
1438 if (cond_resched_lock(&root->inode_lock)) 1439 if (cond_resched_lock(&root->inode_lock))
1439 goto again; 1440 goto again;
1440 1441
@@ -1470,7 +1471,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1470 return -ENOMEM; 1471 return -ENOMEM;
1471 1472
1472 bytenr -= BTRFS_I(reloc_inode)->index_cnt; 1473 bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1473 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, 1474 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode),
1474 bytenr, 0); 1475 bytenr, 0);
1475 if (ret < 0) 1476 if (ret < 0)
1476 goto out; 1477 goto out;
@@ -1558,11 +1559,11 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
1558 if (first) { 1559 if (first) {
1559 inode = find_next_inode(root, key.objectid); 1560 inode = find_next_inode(root, key.objectid);
1560 first = 0; 1561 first = 0;
1561 } else if (inode && inode->i_ino < key.objectid) { 1562 } else if (inode && btrfs_ino(inode) < key.objectid) {
1562 btrfs_add_delayed_iput(inode); 1563 btrfs_add_delayed_iput(inode);
1563 inode = find_next_inode(root, key.objectid); 1564 inode = find_next_inode(root, key.objectid);
1564 } 1565 }
1565 if (inode && inode->i_ino == key.objectid) { 1566 if (inode && btrfs_ino(inode) == key.objectid) {
1566 end = key.offset + 1567 end = key.offset +
1567 btrfs_file_extent_num_bytes(leaf, fi); 1568 btrfs_file_extent_num_bytes(leaf, fi);
1568 WARN_ON(!IS_ALIGNED(key.offset, 1569 WARN_ON(!IS_ALIGNED(key.offset,
@@ -1893,6 +1894,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
1893 struct inode *inode = NULL; 1894 struct inode *inode = NULL;
1894 u64 objectid; 1895 u64 objectid;
1895 u64 start, end; 1896 u64 start, end;
1897 u64 ino;
1896 1898
1897 objectid = min_key->objectid; 1899 objectid = min_key->objectid;
1898 while (1) { 1900 while (1) {
@@ -1905,17 +1907,18 @@ static int invalidate_extent_cache(struct btrfs_root *root,
1905 inode = find_next_inode(root, objectid); 1907 inode = find_next_inode(root, objectid);
1906 if (!inode) 1908 if (!inode)
1907 break; 1909 break;
1910 ino = btrfs_ino(inode);
1908 1911
1909 if (inode->i_ino > max_key->objectid) { 1912 if (ino > max_key->objectid) {
1910 iput(inode); 1913 iput(inode);
1911 break; 1914 break;
1912 } 1915 }
1913 1916
1914 objectid = inode->i_ino + 1; 1917 objectid = ino + 1;
1915 if (!S_ISREG(inode->i_mode)) 1918 if (!S_ISREG(inode->i_mode))
1916 continue; 1919 continue;
1917 1920
1918 if (unlikely(min_key->objectid == inode->i_ino)) { 1921 if (unlikely(min_key->objectid == ino)) {
1919 if (min_key->type > BTRFS_EXTENT_DATA_KEY) 1922 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1920 continue; 1923 continue;
1921 if (min_key->type < BTRFS_EXTENT_DATA_KEY) 1924 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
@@ -1928,7 +1931,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
1928 start = 0; 1931 start = 0;
1929 } 1932 }
1930 1933
1931 if (unlikely(max_key->objectid == inode->i_ino)) { 1934 if (unlikely(max_key->objectid == ino)) {
1932 if (max_key->type < BTRFS_EXTENT_DATA_KEY) 1935 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1933 continue; 1936 continue;
1934 if (max_key->type > BTRFS_EXTENT_DATA_KEY) { 1937 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
@@ -3897,7 +3900,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
3897 if (IS_ERR(trans)) 3900 if (IS_ERR(trans))
3898 return ERR_CAST(trans); 3901 return ERR_CAST(trans);
3899 3902
3900 err = btrfs_find_free_objectid(trans, root, objectid, &objectid); 3903 err = btrfs_find_free_objectid(root, &objectid);
3901 if (err) 3904 if (err)
3902 goto out; 3905 goto out;
3903 3906
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index c571734d5e5..4d1dbcbbaf4 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -27,6 +27,7 @@
27#include "transaction.h" 27#include "transaction.h"
28#include "locking.h" 28#include "locking.h"
29#include "tree-log.h" 29#include "tree-log.h"
30#include "inode-map.h"
30 31
31#define BTRFS_ROOT_TRANS_TAG 0 32#define BTRFS_ROOT_TRANS_TAG 0
32 33
@@ -760,8 +761,14 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
760 btrfs_update_reloc_root(trans, root); 761 btrfs_update_reloc_root(trans, root);
761 btrfs_orphan_commit_root(trans, root); 762 btrfs_orphan_commit_root(trans, root);
762 763
764 btrfs_save_ino_cache(root, trans);
765
763 if (root->commit_root != root->node) { 766 if (root->commit_root != root->node) {
767 mutex_lock(&root->fs_commit_mutex);
764 switch_commit_root(root); 768 switch_commit_root(root);
769 btrfs_unpin_free_ino(root);
770 mutex_unlock(&root->fs_commit_mutex);
771
765 btrfs_set_root_node(&root->root_item, 772 btrfs_set_root_node(&root->root_item,
766 root->node); 773 root->node);
767 } 774 }
@@ -930,7 +937,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
930 goto fail; 937 goto fail;
931 } 938 }
932 939
933 ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid); 940 ret = btrfs_find_free_objectid(tree_root, &objectid);
934 if (ret) { 941 if (ret) {
935 pending->error = ret; 942 pending->error = ret;
936 goto fail; 943 goto fail;
@@ -967,7 +974,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
967 BUG_ON(ret); 974 BUG_ON(ret);
968 ret = btrfs_insert_dir_item(trans, parent_root, 975 ret = btrfs_insert_dir_item(trans, parent_root,
969 dentry->d_name.name, dentry->d_name.len, 976 dentry->d_name.name, dentry->d_name.len,
970 parent_inode->i_ino, &key, 977 btrfs_ino(parent_inode), &key,
971 BTRFS_FT_DIR, index); 978 BTRFS_FT_DIR, index);
972 BUG_ON(ret); 979 BUG_ON(ret);
973 980
@@ -1009,7 +1016,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1009 */ 1016 */
1010 ret = btrfs_add_root_ref(trans, tree_root, objectid, 1017 ret = btrfs_add_root_ref(trans, tree_root, objectid,
1011 parent_root->root_key.objectid, 1018 parent_root->root_key.objectid,
1012 parent_inode->i_ino, index, 1019 btrfs_ino(parent_inode), index,
1013 dentry->d_name.name, dentry->d_name.len); 1020 dentry->d_name.name, dentry->d_name.len);
1014 BUG_ON(ret); 1021 BUG_ON(ret);
1015 dput(parent); 1022 dput(parent);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index f997ec0c1ba..177d943755f 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -519,7 +519,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
519 * file. This must be done before the btrfs_drop_extents run 519 * file. This must be done before the btrfs_drop_extents run
520 * so we don't try to drop this extent. 520 * so we don't try to drop this extent.
521 */ 521 */
522 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 522 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
523 start, 0); 523 start, 0);
524 524
525 if (ret == 0 && 525 if (ret == 0 &&
@@ -832,7 +832,7 @@ again:
832 read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen); 832 read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen);
833 833
834 /* if we already have a perfect match, we're done */ 834 /* if we already have a perfect match, we're done */
835 if (inode_in_dir(root, path, dir->i_ino, inode->i_ino, 835 if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
836 btrfs_inode_ref_index(eb, ref), 836 btrfs_inode_ref_index(eb, ref),
837 name, namelen)) { 837 name, namelen)) {
838 goto out; 838 goto out;
@@ -960,8 +960,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
960 unsigned long ptr; 960 unsigned long ptr;
961 unsigned long ptr_end; 961 unsigned long ptr_end;
962 int name_len; 962 int name_len;
963 u64 ino = btrfs_ino(inode);
963 964
964 key.objectid = inode->i_ino; 965 key.objectid = ino;
965 key.type = BTRFS_INODE_REF_KEY; 966 key.type = BTRFS_INODE_REF_KEY;
966 key.offset = (u64)-1; 967 key.offset = (u64)-1;
967 968
@@ -980,7 +981,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
980 } 981 }
981 btrfs_item_key_to_cpu(path->nodes[0], &key, 982 btrfs_item_key_to_cpu(path->nodes[0], &key,
982 path->slots[0]); 983 path->slots[0]);
983 if (key.objectid != inode->i_ino || 984 if (key.objectid != ino ||
984 key.type != BTRFS_INODE_REF_KEY) 985 key.type != BTRFS_INODE_REF_KEY)
985 break; 986 break;
986 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 987 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
@@ -1011,10 +1012,10 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1011 if (inode->i_nlink == 0) { 1012 if (inode->i_nlink == 0) {
1012 if (S_ISDIR(inode->i_mode)) { 1013 if (S_ISDIR(inode->i_mode)) {
1013 ret = replay_dir_deletes(trans, root, NULL, path, 1014 ret = replay_dir_deletes(trans, root, NULL, path,
1014 inode->i_ino, 1); 1015 ino, 1);
1015 BUG_ON(ret); 1016 BUG_ON(ret);
1016 } 1017 }
1017 ret = insert_orphan_item(trans, root, inode->i_ino); 1018 ret = insert_orphan_item(trans, root, ino);
1018 BUG_ON(ret); 1019 BUG_ON(ret);
1019 } 1020 }
1020 btrfs_free_path(path); 1021 btrfs_free_path(path);
@@ -2197,6 +2198,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2197 int ret; 2198 int ret;
2198 int err = 0; 2199 int err = 0;
2199 int bytes_del = 0; 2200 int bytes_del = 0;
2201 u64 dir_ino = btrfs_ino(dir);
2200 2202
2201 if (BTRFS_I(dir)->logged_trans < trans->transid) 2203 if (BTRFS_I(dir)->logged_trans < trans->transid)
2202 return 0; 2204 return 0;
@@ -2214,7 +2216,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2214 goto out_unlock; 2216 goto out_unlock;
2215 } 2217 }
2216 2218
2217 di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, 2219 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
2218 name, name_len, -1); 2220 name, name_len, -1);
2219 if (IS_ERR(di)) { 2221 if (IS_ERR(di)) {
2220 err = PTR_ERR(di); 2222 err = PTR_ERR(di);
@@ -2226,7 +2228,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2226 BUG_ON(ret); 2228 BUG_ON(ret);
2227 } 2229 }
2228 btrfs_release_path(log, path); 2230 btrfs_release_path(log, path);
2229 di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino, 2231 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
2230 index, name, name_len, -1); 2232 index, name, name_len, -1);
2231 if (IS_ERR(di)) { 2233 if (IS_ERR(di)) {
2232 err = PTR_ERR(di); 2234 err = PTR_ERR(di);
@@ -2244,7 +2246,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2244 if (bytes_del) { 2246 if (bytes_del) {
2245 struct btrfs_key key; 2247 struct btrfs_key key;
2246 2248
2247 key.objectid = dir->i_ino; 2249 key.objectid = dir_ino;
2248 key.offset = 0; 2250 key.offset = 0;
2249 key.type = BTRFS_INODE_ITEM_KEY; 2251 key.type = BTRFS_INODE_ITEM_KEY;
2250 btrfs_release_path(log, path); 2252 btrfs_release_path(log, path);
@@ -2303,7 +2305,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2303 log = root->log_root; 2305 log = root->log_root;
2304 mutex_lock(&BTRFS_I(inode)->log_mutex); 2306 mutex_lock(&BTRFS_I(inode)->log_mutex);
2305 2307
2306 ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino, 2308 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
2307 dirid, &index); 2309 dirid, &index);
2308 mutex_unlock(&BTRFS_I(inode)->log_mutex); 2310 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2309 if (ret == -ENOSPC) { 2311 if (ret == -ENOSPC) {
@@ -2369,13 +2371,14 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2369 int nritems; 2371 int nritems;
2370 u64 first_offset = min_offset; 2372 u64 first_offset = min_offset;
2371 u64 last_offset = (u64)-1; 2373 u64 last_offset = (u64)-1;
2374 u64 ino = btrfs_ino(inode);
2372 2375
2373 log = root->log_root; 2376 log = root->log_root;
2374 max_key.objectid = inode->i_ino; 2377 max_key.objectid = ino;
2375 max_key.offset = (u64)-1; 2378 max_key.offset = (u64)-1;
2376 max_key.type = key_type; 2379 max_key.type = key_type;
2377 2380
2378 min_key.objectid = inode->i_ino; 2381 min_key.objectid = ino;
2379 min_key.type = key_type; 2382 min_key.type = key_type;
2380 min_key.offset = min_offset; 2383 min_key.offset = min_offset;
2381 2384
@@ -2388,9 +2391,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2388 * we didn't find anything from this transaction, see if there 2391 * we didn't find anything from this transaction, see if there
2389 * is anything at all 2392 * is anything at all
2390 */ 2393 */
2391 if (ret != 0 || min_key.objectid != inode->i_ino || 2394 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
2392 min_key.type != key_type) { 2395 min_key.objectid = ino;
2393 min_key.objectid = inode->i_ino;
2394 min_key.type = key_type; 2396 min_key.type = key_type;
2395 min_key.offset = (u64)-1; 2397 min_key.offset = (u64)-1;
2396 btrfs_release_path(root, path); 2398 btrfs_release_path(root, path);
@@ -2399,7 +2401,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2399 btrfs_release_path(root, path); 2401 btrfs_release_path(root, path);
2400 return ret; 2402 return ret;
2401 } 2403 }
2402 ret = btrfs_previous_item(root, path, inode->i_ino, key_type); 2404 ret = btrfs_previous_item(root, path, ino, key_type);
2403 2405
2404 /* if ret == 0 there are items for this type, 2406 /* if ret == 0 there are items for this type,
2405 * create a range to tell us the last key of this type. 2407 * create a range to tell us the last key of this type.
@@ -2417,7 +2419,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2417 } 2419 }
2418 2420
2419 /* go backward to find any previous key */ 2421 /* go backward to find any previous key */
2420 ret = btrfs_previous_item(root, path, inode->i_ino, key_type); 2422 ret = btrfs_previous_item(root, path, ino, key_type);
2421 if (ret == 0) { 2423 if (ret == 0) {
2422 struct btrfs_key tmp; 2424 struct btrfs_key tmp;
2423 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 2425 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
@@ -2452,8 +2454,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2452 for (i = path->slots[0]; i < nritems; i++) { 2454 for (i = path->slots[0]; i < nritems; i++) {
2453 btrfs_item_key_to_cpu(src, &min_key, i); 2455 btrfs_item_key_to_cpu(src, &min_key, i);
2454 2456
2455 if (min_key.objectid != inode->i_ino || 2457 if (min_key.objectid != ino || min_key.type != key_type)
2456 min_key.type != key_type)
2457 goto done; 2458 goto done;
2458 ret = overwrite_item(trans, log, dst_path, src, i, 2459 ret = overwrite_item(trans, log, dst_path, src, i,
2459 &min_key); 2460 &min_key);
@@ -2474,7 +2475,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2474 goto done; 2475 goto done;
2475 } 2476 }
2476 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 2477 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
2477 if (tmp.objectid != inode->i_ino || tmp.type != key_type) { 2478 if (tmp.objectid != ino || tmp.type != key_type) {
2478 last_offset = (u64)-1; 2479 last_offset = (u64)-1;
2479 goto done; 2480 goto done;
2480 } 2481 }
@@ -2500,8 +2501,7 @@ done:
2500 * is valid 2501 * is valid
2501 */ 2502 */
2502 ret = insert_dir_log_key(trans, log, path, key_type, 2503 ret = insert_dir_log_key(trans, log, path, key_type,
2503 inode->i_ino, first_offset, 2504 ino, first_offset, last_offset);
2504 last_offset);
2505 if (ret) 2505 if (ret)
2506 err = ret; 2506 err = ret;
2507 } 2507 }
@@ -2745,6 +2745,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
2745 int nritems; 2745 int nritems;
2746 int ins_start_slot = 0; 2746 int ins_start_slot = 0;
2747 int ins_nr; 2747 int ins_nr;
2748 u64 ino = btrfs_ino(inode);
2748 2749
2749 log = root->log_root; 2750 log = root->log_root;
2750 2751
@@ -2757,11 +2758,11 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
2757 return -ENOMEM; 2758 return -ENOMEM;
2758 } 2759 }
2759 2760
2760 min_key.objectid = inode->i_ino; 2761 min_key.objectid = ino;
2761 min_key.type = BTRFS_INODE_ITEM_KEY; 2762 min_key.type = BTRFS_INODE_ITEM_KEY;
2762 min_key.offset = 0; 2763 min_key.offset = 0;
2763 2764
2764 max_key.objectid = inode->i_ino; 2765 max_key.objectid = ino;
2765 2766
2766 /* today the code can only do partial logging of directories */ 2767 /* today the code can only do partial logging of directories */
2767 if (!S_ISDIR(inode->i_mode)) 2768 if (!S_ISDIR(inode->i_mode))
@@ -2784,8 +2785,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
2784 2785
2785 if (inode_only == LOG_INODE_EXISTS) 2786 if (inode_only == LOG_INODE_EXISTS)
2786 max_key_type = BTRFS_XATTR_ITEM_KEY; 2787 max_key_type = BTRFS_XATTR_ITEM_KEY;
2787 ret = drop_objectid_items(trans, log, path, 2788 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
2788 inode->i_ino, max_key_type);
2789 } else { 2789 } else {
2790 ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0); 2790 ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0);
2791 } 2791 }
@@ -2803,7 +2803,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
2803 break; 2803 break;
2804again: 2804again:
2805 /* note, ins_nr might be > 0 here, cleanup outside the loop */ 2805 /* note, ins_nr might be > 0 here, cleanup outside the loop */
2806 if (min_key.objectid != inode->i_ino) 2806 if (min_key.objectid != ino)
2807 break; 2807 break;
2808 if (min_key.type > max_key.type) 2808 if (min_key.type > max_key.type)
2809 break; 2809 break;
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index cfd660550de..4e5a32173c4 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -44,7 +44,7 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
44 return -ENOMEM; 44 return -ENOMEM;
45 45
46 /* lookup the xattr by name */ 46 /* lookup the xattr by name */
47 di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name, 47 di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name,
48 strlen(name), 0); 48 strlen(name), 0);
49 if (!di) { 49 if (!di) {
50 ret = -ENODATA; 50 ret = -ENODATA;
@@ -103,7 +103,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
103 return -ENOMEM; 103 return -ENOMEM;
104 104
105 /* first lets see if we already have this xattr */ 105 /* first lets see if we already have this xattr */
106 di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name, 106 di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
107 strlen(name), -1); 107 strlen(name), -1);
108 if (IS_ERR(di)) { 108 if (IS_ERR(di)) {
109 ret = PTR_ERR(di); 109 ret = PTR_ERR(di);
@@ -136,7 +136,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
136 } 136 }
137 137
138 /* ok we have to create a completely new xattr */ 138 /* ok we have to create a completely new xattr */
139 ret = btrfs_insert_xattr_item(trans, root, path, inode->i_ino, 139 ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
140 name, name_len, value, size); 140 name, name_len, value, size);
141 BUG_ON(ret); 141 BUG_ON(ret);
142out: 142out:
@@ -190,7 +190,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
190 * NOTE: we set key.offset = 0; because we want to start with the 190 * NOTE: we set key.offset = 0; because we want to start with the
191 * first xattr that we find and walk forward 191 * first xattr that we find and walk forward
192 */ 192 */
193 key.objectid = inode->i_ino; 193 key.objectid = btrfs_ino(inode);
194 btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); 194 btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
195 key.offset = 0; 195 key.offset = 0;
196 196