diff options
author | Chris Mason <chris.mason@oracle.com> | 2011-05-21 09:27:38 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2011-05-21 09:27:38 -0400 |
commit | 0965537308ac3b267ea16e731bd73870a51c53b8 (patch) | |
tree | 7b52288b7272b2391f736dd82a313cbbaad68570 /fs/btrfs/free-space-cache.c | |
parent | 61c4f2c81c61f73549928dfd9f3e8f26aa36a8cf (diff) | |
parent | 82d5902d9c681be37ffa9d70482907f9f0b7ec1f (diff) |
Merge branch 'ino-alloc' of git://repo.or.cz/linux-btrfs-devel into inode_numbers
Conflicts:
fs/btrfs/free-space-cache.c
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r-- | fs/btrfs/free-space-cache.c | 976 |
1 files changed, 596 insertions, 380 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 63731a1fb0a1..25a13ab750f8 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -25,18 +25,17 @@ | |||
25 | #include "transaction.h" | 25 | #include "transaction.h" |
26 | #include "disk-io.h" | 26 | #include "disk-io.h" |
27 | #include "extent_io.h" | 27 | #include "extent_io.h" |
28 | #include "inode-map.h" | ||
28 | 29 | ||
29 | #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) | 30 | #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) |
30 | #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) | 31 | #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) |
31 | 32 | ||
32 | static void recalculate_thresholds(struct btrfs_block_group_cache | 33 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
33 | *block_group); | ||
34 | static int link_free_space(struct btrfs_block_group_cache *block_group, | ||
35 | struct btrfs_free_space *info); | 34 | struct btrfs_free_space *info); |
36 | 35 | ||
37 | struct inode *lookup_free_space_inode(struct btrfs_root *root, | 36 | static struct inode *__lookup_free_space_inode(struct btrfs_root *root, |
38 | struct btrfs_block_group_cache | 37 | struct btrfs_path *path, |
39 | *block_group, struct btrfs_path *path) | 38 | u64 offset) |
40 | { | 39 | { |
41 | struct btrfs_key key; | 40 | struct btrfs_key key; |
42 | struct btrfs_key location; | 41 | struct btrfs_key location; |
@@ -46,15 +45,8 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, | |||
46 | struct inode *inode = NULL; | 45 | struct inode *inode = NULL; |
47 | int ret; | 46 | int ret; |
48 | 47 | ||
49 | spin_lock(&block_group->lock); | ||
50 | if (block_group->inode) | ||
51 | inode = igrab(block_group->inode); | ||
52 | spin_unlock(&block_group->lock); | ||
53 | if (inode) | ||
54 | return inode; | ||
55 | |||
56 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | 48 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
57 | key.offset = block_group->key.objectid; | 49 | key.offset = offset; |
58 | key.type = 0; | 50 | key.type = 0; |
59 | 51 | ||
60 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 52 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
@@ -84,6 +76,27 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, | |||
84 | 76 | ||
85 | inode->i_mapping->flags &= ~__GFP_FS; | 77 | inode->i_mapping->flags &= ~__GFP_FS; |
86 | 78 | ||
79 | return inode; | ||
80 | } | ||
81 | |||
82 | struct inode *lookup_free_space_inode(struct btrfs_root *root, | ||
83 | struct btrfs_block_group_cache | ||
84 | *block_group, struct btrfs_path *path) | ||
85 | { | ||
86 | struct inode *inode = NULL; | ||
87 | |||
88 | spin_lock(&block_group->lock); | ||
89 | if (block_group->inode) | ||
90 | inode = igrab(block_group->inode); | ||
91 | spin_unlock(&block_group->lock); | ||
92 | if (inode) | ||
93 | return inode; | ||
94 | |||
95 | inode = __lookup_free_space_inode(root, path, | ||
96 | block_group->key.objectid); | ||
97 | if (IS_ERR(inode)) | ||
98 | return inode; | ||
99 | |||
87 | spin_lock(&block_group->lock); | 100 | spin_lock(&block_group->lock); |
88 | if (!root->fs_info->closing) { | 101 | if (!root->fs_info->closing) { |
89 | block_group->inode = igrab(inode); | 102 | block_group->inode = igrab(inode); |
@@ -94,24 +107,18 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, | |||
94 | return inode; | 107 | return inode; |
95 | } | 108 | } |
96 | 109 | ||
97 | int create_free_space_inode(struct btrfs_root *root, | 110 | int __create_free_space_inode(struct btrfs_root *root, |
98 | struct btrfs_trans_handle *trans, | 111 | struct btrfs_trans_handle *trans, |
99 | struct btrfs_block_group_cache *block_group, | 112 | struct btrfs_path *path, u64 ino, u64 offset) |
100 | struct btrfs_path *path) | ||
101 | { | 113 | { |
102 | struct btrfs_key key; | 114 | struct btrfs_key key; |
103 | struct btrfs_disk_key disk_key; | 115 | struct btrfs_disk_key disk_key; |
104 | struct btrfs_free_space_header *header; | 116 | struct btrfs_free_space_header *header; |
105 | struct btrfs_inode_item *inode_item; | 117 | struct btrfs_inode_item *inode_item; |
106 | struct extent_buffer *leaf; | 118 | struct extent_buffer *leaf; |
107 | u64 objectid; | ||
108 | int ret; | 119 | int ret; |
109 | 120 | ||
110 | ret = btrfs_find_free_objectid(trans, root, 0, &objectid); | 121 | ret = btrfs_insert_empty_inode(trans, root, path, ino); |
111 | if (ret < 0) | ||
112 | return ret; | ||
113 | |||
114 | ret = btrfs_insert_empty_inode(trans, root, path, objectid); | ||
115 | if (ret) | 122 | if (ret) |
116 | return ret; | 123 | return ret; |
117 | 124 | ||
@@ -131,13 +138,12 @@ int create_free_space_inode(struct btrfs_root *root, | |||
131 | BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM); | 138 | BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM); |
132 | btrfs_set_inode_nlink(leaf, inode_item, 1); | 139 | btrfs_set_inode_nlink(leaf, inode_item, 1); |
133 | btrfs_set_inode_transid(leaf, inode_item, trans->transid); | 140 | btrfs_set_inode_transid(leaf, inode_item, trans->transid); |
134 | btrfs_set_inode_block_group(leaf, inode_item, | 141 | btrfs_set_inode_block_group(leaf, inode_item, offset); |
135 | block_group->key.objectid); | ||
136 | btrfs_mark_buffer_dirty(leaf); | 142 | btrfs_mark_buffer_dirty(leaf); |
137 | btrfs_release_path(root, path); | 143 | btrfs_release_path(root, path); |
138 | 144 | ||
139 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | 145 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
140 | key.offset = block_group->key.objectid; | 146 | key.offset = offset; |
141 | key.type = 0; | 147 | key.type = 0; |
142 | 148 | ||
143 | ret = btrfs_insert_empty_item(trans, root, path, &key, | 149 | ret = btrfs_insert_empty_item(trans, root, path, &key, |
@@ -157,6 +163,22 @@ int create_free_space_inode(struct btrfs_root *root, | |||
157 | return 0; | 163 | return 0; |
158 | } | 164 | } |
159 | 165 | ||
166 | int create_free_space_inode(struct btrfs_root *root, | ||
167 | struct btrfs_trans_handle *trans, | ||
168 | struct btrfs_block_group_cache *block_group, | ||
169 | struct btrfs_path *path) | ||
170 | { | ||
171 | int ret; | ||
172 | u64 ino; | ||
173 | |||
174 | ret = btrfs_find_free_objectid(root, &ino); | ||
175 | if (ret < 0) | ||
176 | return ret; | ||
177 | |||
178 | return __create_free_space_inode(root, trans, path, ino, | ||
179 | block_group->key.objectid); | ||
180 | } | ||
181 | |||
160 | int btrfs_truncate_free_space_cache(struct btrfs_root *root, | 182 | int btrfs_truncate_free_space_cache(struct btrfs_root *root, |
161 | struct btrfs_trans_handle *trans, | 183 | struct btrfs_trans_handle *trans, |
162 | struct btrfs_path *path, | 184 | struct btrfs_path *path, |
@@ -187,7 +209,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, | |||
187 | return ret; | 209 | return ret; |
188 | } | 210 | } |
189 | 211 | ||
190 | return btrfs_update_inode(trans, root, inode); | 212 | ret = btrfs_update_inode(trans, root, inode); |
213 | return ret; | ||
191 | } | 214 | } |
192 | 215 | ||
193 | static int readahead_cache(struct inode *inode) | 216 | static int readahead_cache(struct inode *inode) |
@@ -209,15 +232,13 @@ static int readahead_cache(struct inode *inode) | |||
209 | return 0; | 232 | return 0; |
210 | } | 233 | } |
211 | 234 | ||
212 | int load_free_space_cache(struct btrfs_fs_info *fs_info, | 235 | int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, |
213 | struct btrfs_block_group_cache *block_group) | 236 | struct btrfs_free_space_ctl *ctl, |
237 | struct btrfs_path *path, u64 offset) | ||
214 | { | 238 | { |
215 | struct btrfs_root *root = fs_info->tree_root; | ||
216 | struct inode *inode; | ||
217 | struct btrfs_free_space_header *header; | 239 | struct btrfs_free_space_header *header; |
218 | struct extent_buffer *leaf; | 240 | struct extent_buffer *leaf; |
219 | struct page *page; | 241 | struct page *page; |
220 | struct btrfs_path *path; | ||
221 | u32 *checksums = NULL, *crc; | 242 | u32 *checksums = NULL, *crc; |
222 | char *disk_crcs = NULL; | 243 | char *disk_crcs = NULL; |
223 | struct btrfs_key key; | 244 | struct btrfs_key key; |
@@ -225,76 +246,47 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
225 | u64 num_entries; | 246 | u64 num_entries; |
226 | u64 num_bitmaps; | 247 | u64 num_bitmaps; |
227 | u64 generation; | 248 | u64 generation; |
228 | u64 used = btrfs_block_group_used(&block_group->item); | ||
229 | u32 cur_crc = ~(u32)0; | 249 | u32 cur_crc = ~(u32)0; |
230 | pgoff_t index = 0; | 250 | pgoff_t index = 0; |
231 | unsigned long first_page_offset; | 251 | unsigned long first_page_offset; |
232 | int num_checksums; | 252 | int num_checksums; |
233 | int ret = 0; | 253 | int ret = 0, ret2; |
234 | |||
235 | /* | ||
236 | * If we're unmounting then just return, since this does a search on the | ||
237 | * normal root and not the commit root and we could deadlock. | ||
238 | */ | ||
239 | smp_mb(); | ||
240 | if (fs_info->closing) | ||
241 | return 0; | ||
242 | |||
243 | /* | ||
244 | * If this block group has been marked to be cleared for one reason or | ||
245 | * another then we can't trust the on disk cache, so just return. | ||
246 | */ | ||
247 | spin_lock(&block_group->lock); | ||
248 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { | ||
249 | spin_unlock(&block_group->lock); | ||
250 | return 0; | ||
251 | } | ||
252 | spin_unlock(&block_group->lock); | ||
253 | 254 | ||
254 | INIT_LIST_HEAD(&bitmaps); | 255 | INIT_LIST_HEAD(&bitmaps); |
255 | 256 | ||
256 | path = btrfs_alloc_path(); | ||
257 | if (!path) | ||
258 | return 0; | ||
259 | |||
260 | inode = lookup_free_space_inode(root, block_group, path); | ||
261 | if (IS_ERR(inode)) { | ||
262 | btrfs_free_path(path); | ||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | /* Nothing in the space cache, goodbye */ | 257 | /* Nothing in the space cache, goodbye */ |
267 | if (!i_size_read(inode)) { | 258 | if (!i_size_read(inode)) |
268 | btrfs_free_path(path); | ||
269 | goto out; | 259 | goto out; |
270 | } | ||
271 | 260 | ||
272 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | 261 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
273 | key.offset = block_group->key.objectid; | 262 | key.offset = offset; |
274 | key.type = 0; | 263 | key.type = 0; |
275 | 264 | ||
276 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 265 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
277 | if (ret) { | 266 | if (ret < 0) |
278 | btrfs_free_path(path); | 267 | goto out; |
268 | else if (ret > 0) { | ||
269 | btrfs_release_path(root, path); | ||
270 | ret = 0; | ||
279 | goto out; | 271 | goto out; |
280 | } | 272 | } |
281 | 273 | ||
274 | ret = -1; | ||
275 | |||
282 | leaf = path->nodes[0]; | 276 | leaf = path->nodes[0]; |
283 | header = btrfs_item_ptr(leaf, path->slots[0], | 277 | header = btrfs_item_ptr(leaf, path->slots[0], |
284 | struct btrfs_free_space_header); | 278 | struct btrfs_free_space_header); |
285 | num_entries = btrfs_free_space_entries(leaf, header); | 279 | num_entries = btrfs_free_space_entries(leaf, header); |
286 | num_bitmaps = btrfs_free_space_bitmaps(leaf, header); | 280 | num_bitmaps = btrfs_free_space_bitmaps(leaf, header); |
287 | generation = btrfs_free_space_generation(leaf, header); | 281 | generation = btrfs_free_space_generation(leaf, header); |
288 | btrfs_free_path(path); | 282 | btrfs_release_path(root, path); |
289 | 283 | ||
290 | if (BTRFS_I(inode)->generation != generation) { | 284 | if (BTRFS_I(inode)->generation != generation) { |
291 | printk(KERN_ERR "btrfs: free space inode generation (%llu) did" | 285 | printk(KERN_ERR "btrfs: free space inode generation (%llu) did" |
292 | " not match free space cache generation (%llu) for " | 286 | " not match free space cache generation (%llu)\n", |
293 | "block group %llu\n", | ||
294 | (unsigned long long)BTRFS_I(inode)->generation, | 287 | (unsigned long long)BTRFS_I(inode)->generation, |
295 | (unsigned long long)generation, | 288 | (unsigned long long)generation); |
296 | (unsigned long long)block_group->key.objectid); | 289 | goto out; |
297 | goto free_cache; | ||
298 | } | 290 | } |
299 | 291 | ||
300 | if (!num_entries) | 292 | if (!num_entries) |
@@ -311,10 +303,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
311 | goto out; | 303 | goto out; |
312 | 304 | ||
313 | ret = readahead_cache(inode); | 305 | ret = readahead_cache(inode); |
314 | if (ret) { | 306 | if (ret) |
315 | ret = 0; | ||
316 | goto out; | 307 | goto out; |
317 | } | ||
318 | 308 | ||
319 | while (1) { | 309 | while (1) { |
320 | struct btrfs_free_space_entry *entry; | 310 | struct btrfs_free_space_entry *entry; |
@@ -333,10 +323,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
333 | } | 323 | } |
334 | 324 | ||
335 | page = grab_cache_page(inode->i_mapping, index); | 325 | page = grab_cache_page(inode->i_mapping, index); |
336 | if (!page) { | 326 | if (!page) |
337 | ret = 0; | ||
338 | goto free_cache; | 327 | goto free_cache; |
339 | } | ||
340 | 328 | ||
341 | if (!PageUptodate(page)) { | 329 | if (!PageUptodate(page)) { |
342 | btrfs_readpage(NULL, page); | 330 | btrfs_readpage(NULL, page); |
@@ -345,9 +333,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
345 | unlock_page(page); | 333 | unlock_page(page); |
346 | page_cache_release(page); | 334 | page_cache_release(page); |
347 | printk(KERN_ERR "btrfs: error reading free " | 335 | printk(KERN_ERR "btrfs: error reading free " |
348 | "space cache: %llu\n", | 336 | "space cache\n"); |
349 | (unsigned long long) | ||
350 | block_group->key.objectid); | ||
351 | goto free_cache; | 337 | goto free_cache; |
352 | } | 338 | } |
353 | } | 339 | } |
@@ -360,13 +346,10 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
360 | gen = addr + (sizeof(u32) * num_checksums); | 346 | gen = addr + (sizeof(u32) * num_checksums); |
361 | if (*gen != BTRFS_I(inode)->generation) { | 347 | if (*gen != BTRFS_I(inode)->generation) { |
362 | printk(KERN_ERR "btrfs: space cache generation" | 348 | printk(KERN_ERR "btrfs: space cache generation" |
363 | " (%llu) does not match inode (%llu) " | 349 | " (%llu) does not match inode (%llu)\n", |
364 | "for block group %llu\n", | ||
365 | (unsigned long long)*gen, | 350 | (unsigned long long)*gen, |
366 | (unsigned long long) | 351 | (unsigned long long) |
367 | BTRFS_I(inode)->generation, | 352 | BTRFS_I(inode)->generation); |
368 | (unsigned long long) | ||
369 | block_group->key.objectid); | ||
370 | kunmap(page); | 353 | kunmap(page); |
371 | unlock_page(page); | 354 | unlock_page(page); |
372 | page_cache_release(page); | 355 | page_cache_release(page); |
@@ -382,9 +365,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
382 | PAGE_CACHE_SIZE - start_offset); | 365 | PAGE_CACHE_SIZE - start_offset); |
383 | btrfs_csum_final(cur_crc, (char *)&cur_crc); | 366 | btrfs_csum_final(cur_crc, (char *)&cur_crc); |
384 | if (cur_crc != *crc) { | 367 | if (cur_crc != *crc) { |
385 | printk(KERN_ERR "btrfs: crc mismatch for page %lu in " | 368 | printk(KERN_ERR "btrfs: crc mismatch for page %lu\n", |
386 | "block group %llu\n", index, | 369 | index); |
387 | (unsigned long long)block_group->key.objectid); | ||
388 | kunmap(page); | 370 | kunmap(page); |
389 | unlock_page(page); | 371 | unlock_page(page); |
390 | page_cache_release(page); | 372 | page_cache_release(page); |
@@ -417,9 +399,9 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
417 | } | 399 | } |
418 | 400 | ||
419 | if (entry->type == BTRFS_FREE_SPACE_EXTENT) { | 401 | if (entry->type == BTRFS_FREE_SPACE_EXTENT) { |
420 | spin_lock(&block_group->tree_lock); | 402 | spin_lock(&ctl->tree_lock); |
421 | ret = link_free_space(block_group, e); | 403 | ret = link_free_space(ctl, e); |
422 | spin_unlock(&block_group->tree_lock); | 404 | spin_unlock(&ctl->tree_lock); |
423 | BUG_ON(ret); | 405 | BUG_ON(ret); |
424 | } else { | 406 | } else { |
425 | e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); | 407 | e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); |
@@ -431,11 +413,11 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
431 | page_cache_release(page); | 413 | page_cache_release(page); |
432 | goto free_cache; | 414 | goto free_cache; |
433 | } | 415 | } |
434 | spin_lock(&block_group->tree_lock); | 416 | spin_lock(&ctl->tree_lock); |
435 | ret = link_free_space(block_group, e); | 417 | ret2 = link_free_space(ctl, e); |
436 | block_group->total_bitmaps++; | 418 | ctl->total_bitmaps++; |
437 | recalculate_thresholds(block_group); | 419 | ctl->op->recalc_thresholds(ctl); |
438 | spin_unlock(&block_group->tree_lock); | 420 | spin_unlock(&ctl->tree_lock); |
439 | list_add_tail(&e->list, &bitmaps); | 421 | list_add_tail(&e->list, &bitmaps); |
440 | } | 422 | } |
441 | 423 | ||
@@ -471,41 +453,97 @@ next: | |||
471 | index++; | 453 | index++; |
472 | } | 454 | } |
473 | 455 | ||
474 | spin_lock(&block_group->tree_lock); | ||
475 | if (block_group->free_space != (block_group->key.offset - used - | ||
476 | block_group->bytes_super)) { | ||
477 | spin_unlock(&block_group->tree_lock); | ||
478 | printk(KERN_ERR "block group %llu has an wrong amount of free " | ||
479 | "space\n", block_group->key.objectid); | ||
480 | ret = 0; | ||
481 | goto free_cache; | ||
482 | } | ||
483 | spin_unlock(&block_group->tree_lock); | ||
484 | |||
485 | ret = 1; | 456 | ret = 1; |
486 | out: | 457 | out: |
487 | kfree(checksums); | 458 | kfree(checksums); |
488 | kfree(disk_crcs); | 459 | kfree(disk_crcs); |
489 | iput(inode); | ||
490 | return ret; | 460 | return ret; |
491 | |||
492 | free_cache: | 461 | free_cache: |
493 | /* This cache is bogus, make sure it gets cleared */ | 462 | __btrfs_remove_free_space_cache(ctl); |
463 | goto out; | ||
464 | } | ||
465 | |||
466 | int load_free_space_cache(struct btrfs_fs_info *fs_info, | ||
467 | struct btrfs_block_group_cache *block_group) | ||
468 | { | ||
469 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
470 | struct btrfs_root *root = fs_info->tree_root; | ||
471 | struct inode *inode; | ||
472 | struct btrfs_path *path; | ||
473 | int ret; | ||
474 | bool matched; | ||
475 | u64 used = btrfs_block_group_used(&block_group->item); | ||
476 | |||
477 | /* | ||
478 | * If we're unmounting then just return, since this does a search on the | ||
479 | * normal root and not the commit root and we could deadlock. | ||
480 | */ | ||
481 | smp_mb(); | ||
482 | if (fs_info->closing) | ||
483 | return 0; | ||
484 | |||
485 | /* | ||
486 | * If this block group has been marked to be cleared for one reason or | ||
487 | * another then we can't trust the on disk cache, so just return. | ||
488 | */ | ||
494 | spin_lock(&block_group->lock); | 489 | spin_lock(&block_group->lock); |
495 | block_group->disk_cache_state = BTRFS_DC_CLEAR; | 490 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { |
491 | spin_unlock(&block_group->lock); | ||
492 | return 0; | ||
493 | } | ||
496 | spin_unlock(&block_group->lock); | 494 | spin_unlock(&block_group->lock); |
497 | btrfs_remove_free_space_cache(block_group); | 495 | |
498 | goto out; | 496 | path = btrfs_alloc_path(); |
497 | if (!path) | ||
498 | return 0; | ||
499 | |||
500 | inode = lookup_free_space_inode(root, block_group, path); | ||
501 | if (IS_ERR(inode)) { | ||
502 | btrfs_free_path(path); | ||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, | ||
507 | path, block_group->key.objectid); | ||
508 | btrfs_free_path(path); | ||
509 | if (ret <= 0) | ||
510 | goto out; | ||
511 | |||
512 | spin_lock(&ctl->tree_lock); | ||
513 | matched = (ctl->free_space == (block_group->key.offset - used - | ||
514 | block_group->bytes_super)); | ||
515 | spin_unlock(&ctl->tree_lock); | ||
516 | |||
517 | if (!matched) { | ||
518 | __btrfs_remove_free_space_cache(ctl); | ||
519 | printk(KERN_ERR "block group %llu has an wrong amount of free " | ||
520 | "space\n", block_group->key.objectid); | ||
521 | ret = -1; | ||
522 | } | ||
523 | out: | ||
524 | if (ret < 0) { | ||
525 | /* This cache is bogus, make sure it gets cleared */ | ||
526 | spin_lock(&block_group->lock); | ||
527 | block_group->disk_cache_state = BTRFS_DC_CLEAR; | ||
528 | spin_unlock(&block_group->lock); | ||
529 | ret = 0; | ||
530 | |||
531 | printk(KERN_ERR "btrfs: failed to load free space cache " | ||
532 | "for block group %llu\n", block_group->key.objectid); | ||
533 | } | ||
534 | |||
535 | iput(inode); | ||
536 | return ret; | ||
499 | } | 537 | } |
500 | 538 | ||
501 | int btrfs_write_out_cache(struct btrfs_root *root, | 539 | int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, |
502 | struct btrfs_trans_handle *trans, | 540 | struct btrfs_free_space_ctl *ctl, |
503 | struct btrfs_block_group_cache *block_group, | 541 | struct btrfs_block_group_cache *block_group, |
504 | struct btrfs_path *path) | 542 | struct btrfs_trans_handle *trans, |
543 | struct btrfs_path *path, u64 offset) | ||
505 | { | 544 | { |
506 | struct btrfs_free_space_header *header; | 545 | struct btrfs_free_space_header *header; |
507 | struct extent_buffer *leaf; | 546 | struct extent_buffer *leaf; |
508 | struct inode *inode; | ||
509 | struct rb_node *node; | 547 | struct rb_node *node; |
510 | struct list_head *pos, *n; | 548 | struct list_head *pos, *n; |
511 | struct page **pages; | 549 | struct page **pages; |
@@ -522,35 +560,18 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
522 | int index = 0, num_pages = 0; | 560 | int index = 0, num_pages = 0; |
523 | int entries = 0; | 561 | int entries = 0; |
524 | int bitmaps = 0; | 562 | int bitmaps = 0; |
525 | int ret = 0; | 563 | int ret = -1; |
526 | bool next_page = false; | 564 | bool next_page = false; |
527 | bool out_of_space = false; | 565 | bool out_of_space = false; |
528 | 566 | ||
529 | root = root->fs_info->tree_root; | ||
530 | |||
531 | INIT_LIST_HEAD(&bitmap_list); | 567 | INIT_LIST_HEAD(&bitmap_list); |
532 | 568 | ||
533 | spin_lock(&block_group->lock); | 569 | node = rb_first(&ctl->free_space_offset); |
534 | if (block_group->disk_cache_state < BTRFS_DC_SETUP) { | 570 | if (!node) |
535 | spin_unlock(&block_group->lock); | ||
536 | return 0; | ||
537 | } | ||
538 | spin_unlock(&block_group->lock); | ||
539 | |||
540 | inode = lookup_free_space_inode(root, block_group, path); | ||
541 | if (IS_ERR(inode)) | ||
542 | return 0; | 571 | return 0; |
543 | 572 | ||
544 | if (!i_size_read(inode)) { | 573 | if (!i_size_read(inode)) |
545 | iput(inode); | 574 | return -1; |
546 | return 0; | ||
547 | } | ||
548 | |||
549 | node = rb_first(&block_group->free_space_offset); | ||
550 | if (!node) { | ||
551 | iput(inode); | ||
552 | return 0; | ||
553 | } | ||
554 | 575 | ||
555 | num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> | 576 | num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> |
556 | PAGE_CACHE_SHIFT; | 577 | PAGE_CACHE_SHIFT; |
@@ -560,16 +581,13 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
560 | 581 | ||
561 | /* We need a checksum per page. */ | 582 | /* We need a checksum per page. */ |
562 | crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); | 583 | crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); |
563 | if (!crc) { | 584 | if (!crc) |
564 | iput(inode); | 585 | return -1; |
565 | return 0; | ||
566 | } | ||
567 | 586 | ||
568 | pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); | 587 | pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); |
569 | if (!pages) { | 588 | if (!pages) { |
570 | kfree(crc); | 589 | kfree(crc); |
571 | iput(inode); | 590 | return -1; |
572 | return 0; | ||
573 | } | 591 | } |
574 | 592 | ||
575 | /* Since the first page has all of our checksums and our generation we | 593 | /* Since the first page has all of our checksums and our generation we |
@@ -579,7 +597,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
579 | first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); | 597 | first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); |
580 | 598 | ||
581 | /* Get the cluster for this block_group if it exists */ | 599 | /* Get the cluster for this block_group if it exists */ |
582 | if (!list_empty(&block_group->cluster_list)) | 600 | if (block_group && !list_empty(&block_group->cluster_list)) |
583 | cluster = list_entry(block_group->cluster_list.next, | 601 | cluster = list_entry(block_group->cluster_list.next, |
584 | struct btrfs_free_cluster, | 602 | struct btrfs_free_cluster, |
585 | block_group_list); | 603 | block_group_list); |
@@ -621,7 +639,8 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
621 | * When searching for pinned extents, we need to start at our start | 639 | * When searching for pinned extents, we need to start at our start |
622 | * offset. | 640 | * offset. |
623 | */ | 641 | */ |
624 | start = block_group->key.objectid; | 642 | if (block_group) |
643 | start = block_group->key.objectid; | ||
625 | 644 | ||
626 | /* Write out the extent entries */ | 645 | /* Write out the extent entries */ |
627 | do { | 646 | do { |
@@ -679,8 +698,9 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
679 | * We want to add any pinned extents to our free space cache | 698 | * We want to add any pinned extents to our free space cache |
680 | * so we don't leak the space | 699 | * so we don't leak the space |
681 | */ | 700 | */ |
682 | while (!next_page && (start < block_group->key.objectid + | 701 | while (block_group && !next_page && |
683 | block_group->key.offset)) { | 702 | (start < block_group->key.objectid + |
703 | block_group->key.offset)) { | ||
684 | ret = find_first_extent_bit(unpin, start, &start, &end, | 704 | ret = find_first_extent_bit(unpin, start, &start, &end, |
685 | EXTENT_DIRTY); | 705 | EXTENT_DIRTY); |
686 | if (ret) { | 706 | if (ret) { |
@@ -798,12 +818,12 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
798 | filemap_write_and_wait(inode->i_mapping); | 818 | filemap_write_and_wait(inode->i_mapping); |
799 | 819 | ||
800 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | 820 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
801 | key.offset = block_group->key.objectid; | 821 | key.offset = offset; |
802 | key.type = 0; | 822 | key.type = 0; |
803 | 823 | ||
804 | ret = btrfs_search_slot(trans, root, &key, path, 1, 1); | 824 | ret = btrfs_search_slot(trans, root, &key, path, 1, 1); |
805 | if (ret < 0) { | 825 | if (ret < 0) { |
806 | ret = 0; | 826 | ret = -1; |
807 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, | 827 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, |
808 | EXTENT_DIRTY | EXTENT_DELALLOC | | 828 | EXTENT_DIRTY | EXTENT_DELALLOC | |
809 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); | 829 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); |
@@ -816,8 +836,8 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
816 | path->slots[0]--; | 836 | path->slots[0]--; |
817 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | 837 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
818 | if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || | 838 | if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || |
819 | found_key.offset != block_group->key.objectid) { | 839 | found_key.offset != offset) { |
820 | ret = 0; | 840 | ret = -1; |
821 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, | 841 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, |
822 | EXTENT_DIRTY | EXTENT_DELALLOC | | 842 | EXTENT_DIRTY | EXTENT_DELALLOC | |
823 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, | 843 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, |
@@ -837,44 +857,78 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
837 | ret = 1; | 857 | ret = 1; |
838 | 858 | ||
839 | out_free: | 859 | out_free: |
840 | if (ret == 0) { | 860 | if (ret != 1) { |
841 | invalidate_inode_pages2_range(inode->i_mapping, 0, index); | 861 | invalidate_inode_pages2_range(inode->i_mapping, 0, index); |
842 | spin_lock(&block_group->lock); | ||
843 | block_group->disk_cache_state = BTRFS_DC_ERROR; | ||
844 | spin_unlock(&block_group->lock); | ||
845 | BTRFS_I(inode)->generation = 0; | 862 | BTRFS_I(inode)->generation = 0; |
846 | } | 863 | } |
847 | kfree(checksums); | 864 | kfree(checksums); |
848 | kfree(pages); | 865 | kfree(pages); |
849 | btrfs_update_inode(trans, root, inode); | 866 | btrfs_update_inode(trans, root, inode); |
867 | return ret; | ||
868 | } | ||
869 | |||
870 | int btrfs_write_out_cache(struct btrfs_root *root, | ||
871 | struct btrfs_trans_handle *trans, | ||
872 | struct btrfs_block_group_cache *block_group, | ||
873 | struct btrfs_path *path) | ||
874 | { | ||
875 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
876 | struct inode *inode; | ||
877 | int ret = 0; | ||
878 | |||
879 | root = root->fs_info->tree_root; | ||
880 | |||
881 | spin_lock(&block_group->lock); | ||
882 | if (block_group->disk_cache_state < BTRFS_DC_SETUP) { | ||
883 | spin_unlock(&block_group->lock); | ||
884 | return 0; | ||
885 | } | ||
886 | spin_unlock(&block_group->lock); | ||
887 | |||
888 | inode = lookup_free_space_inode(root, block_group, path); | ||
889 | if (IS_ERR(inode)) | ||
890 | return 0; | ||
891 | |||
892 | ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, | ||
893 | path, block_group->key.objectid); | ||
894 | if (ret < 0) { | ||
895 | spin_lock(&block_group->lock); | ||
896 | block_group->disk_cache_state = BTRFS_DC_ERROR; | ||
897 | spin_unlock(&block_group->lock); | ||
898 | ret = 0; | ||
899 | |||
900 | printk(KERN_ERR "btrfs: failed to write free space cace " | ||
901 | "for block group %llu\n", block_group->key.objectid); | ||
902 | } | ||
903 | |||
850 | iput(inode); | 904 | iput(inode); |
851 | return ret; | 905 | return ret; |
852 | } | 906 | } |
853 | 907 | ||
854 | static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, | 908 | static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit, |
855 | u64 offset) | 909 | u64 offset) |
856 | { | 910 | { |
857 | BUG_ON(offset < bitmap_start); | 911 | BUG_ON(offset < bitmap_start); |
858 | offset -= bitmap_start; | 912 | offset -= bitmap_start; |
859 | return (unsigned long)(div64_u64(offset, sectorsize)); | 913 | return (unsigned long)(div_u64(offset, unit)); |
860 | } | 914 | } |
861 | 915 | ||
862 | static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize) | 916 | static inline unsigned long bytes_to_bits(u64 bytes, u32 unit) |
863 | { | 917 | { |
864 | return (unsigned long)(div64_u64(bytes, sectorsize)); | 918 | return (unsigned long)(div_u64(bytes, unit)); |
865 | } | 919 | } |
866 | 920 | ||
867 | static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group, | 921 | static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl, |
868 | u64 offset) | 922 | u64 offset) |
869 | { | 923 | { |
870 | u64 bitmap_start; | 924 | u64 bitmap_start; |
871 | u64 bytes_per_bitmap; | 925 | u64 bytes_per_bitmap; |
872 | 926 | ||
873 | bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize; | 927 | bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; |
874 | bitmap_start = offset - block_group->key.objectid; | 928 | bitmap_start = offset - ctl->start; |
875 | bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); | 929 | bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); |
876 | bitmap_start *= bytes_per_bitmap; | 930 | bitmap_start *= bytes_per_bitmap; |
877 | bitmap_start += block_group->key.objectid; | 931 | bitmap_start += ctl->start; |
878 | 932 | ||
879 | return bitmap_start; | 933 | return bitmap_start; |
880 | } | 934 | } |
@@ -932,10 +986,10 @@ static int tree_insert_offset(struct rb_root *root, u64 offset, | |||
932 | * offset. | 986 | * offset. |
933 | */ | 987 | */ |
934 | static struct btrfs_free_space * | 988 | static struct btrfs_free_space * |
935 | tree_search_offset(struct btrfs_block_group_cache *block_group, | 989 | tree_search_offset(struct btrfs_free_space_ctl *ctl, |
936 | u64 offset, int bitmap_only, int fuzzy) | 990 | u64 offset, int bitmap_only, int fuzzy) |
937 | { | 991 | { |
938 | struct rb_node *n = block_group->free_space_offset.rb_node; | 992 | struct rb_node *n = ctl->free_space_offset.rb_node; |
939 | struct btrfs_free_space *entry, *prev = NULL; | 993 | struct btrfs_free_space *entry, *prev = NULL; |
940 | 994 | ||
941 | /* find entry that is closest to the 'offset' */ | 995 | /* find entry that is closest to the 'offset' */ |
@@ -1031,8 +1085,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, | |||
1031 | break; | 1085 | break; |
1032 | } | 1086 | } |
1033 | } | 1087 | } |
1034 | if (entry->offset + BITS_PER_BITMAP * | 1088 | if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) |
1035 | block_group->sectorsize > offset) | ||
1036 | return entry; | 1089 | return entry; |
1037 | } else if (entry->offset + entry->bytes > offset) | 1090 | } else if (entry->offset + entry->bytes > offset) |
1038 | return entry; | 1091 | return entry; |
@@ -1043,7 +1096,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, | |||
1043 | while (1) { | 1096 | while (1) { |
1044 | if (entry->bitmap) { | 1097 | if (entry->bitmap) { |
1045 | if (entry->offset + BITS_PER_BITMAP * | 1098 | if (entry->offset + BITS_PER_BITMAP * |
1046 | block_group->sectorsize > offset) | 1099 | ctl->unit > offset) |
1047 | break; | 1100 | break; |
1048 | } else { | 1101 | } else { |
1049 | if (entry->offset + entry->bytes > offset) | 1102 | if (entry->offset + entry->bytes > offset) |
@@ -1059,42 +1112,47 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, | |||
1059 | } | 1112 | } |
1060 | 1113 | ||
1061 | static inline void | 1114 | static inline void |
1062 | __unlink_free_space(struct btrfs_block_group_cache *block_group, | 1115 | __unlink_free_space(struct btrfs_free_space_ctl *ctl, |
1063 | struct btrfs_free_space *info) | 1116 | struct btrfs_free_space *info) |
1064 | { | 1117 | { |
1065 | rb_erase(&info->offset_index, &block_group->free_space_offset); | 1118 | rb_erase(&info->offset_index, &ctl->free_space_offset); |
1066 | block_group->free_extents--; | 1119 | ctl->free_extents--; |
1067 | } | 1120 | } |
1068 | 1121 | ||
1069 | static void unlink_free_space(struct btrfs_block_group_cache *block_group, | 1122 | static void unlink_free_space(struct btrfs_free_space_ctl *ctl, |
1070 | struct btrfs_free_space *info) | 1123 | struct btrfs_free_space *info) |
1071 | { | 1124 | { |
1072 | __unlink_free_space(block_group, info); | 1125 | __unlink_free_space(ctl, info); |
1073 | block_group->free_space -= info->bytes; | 1126 | ctl->free_space -= info->bytes; |
1074 | } | 1127 | } |
1075 | 1128 | ||
1076 | static int link_free_space(struct btrfs_block_group_cache *block_group, | 1129 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
1077 | struct btrfs_free_space *info) | 1130 | struct btrfs_free_space *info) |
1078 | { | 1131 | { |
1079 | int ret = 0; | 1132 | int ret = 0; |
1080 | 1133 | ||
1081 | BUG_ON(!info->bitmap && !info->bytes); | 1134 | BUG_ON(!info->bitmap && !info->bytes); |
1082 | ret = tree_insert_offset(&block_group->free_space_offset, info->offset, | 1135 | ret = tree_insert_offset(&ctl->free_space_offset, info->offset, |
1083 | &info->offset_index, (info->bitmap != NULL)); | 1136 | &info->offset_index, (info->bitmap != NULL)); |
1084 | if (ret) | 1137 | if (ret) |
1085 | return ret; | 1138 | return ret; |
1086 | 1139 | ||
1087 | block_group->free_space += info->bytes; | 1140 | ctl->free_space += info->bytes; |
1088 | block_group->free_extents++; | 1141 | ctl->free_extents++; |
1089 | return ret; | 1142 | return ret; |
1090 | } | 1143 | } |
1091 | 1144 | ||
1092 | static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) | 1145 | static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) |
1093 | { | 1146 | { |
1147 | struct btrfs_block_group_cache *block_group = ctl->private; | ||
1094 | u64 max_bytes; | 1148 | u64 max_bytes; |
1095 | u64 bitmap_bytes; | 1149 | u64 bitmap_bytes; |
1096 | u64 extent_bytes; | 1150 | u64 extent_bytes; |
1097 | u64 size = block_group->key.offset; | 1151 | u64 size = block_group->key.offset; |
1152 | u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; | ||
1153 | int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); | ||
1154 | |||
1155 | BUG_ON(ctl->total_bitmaps > max_bitmaps); | ||
1098 | 1156 | ||
1099 | /* | 1157 | /* |
1100 | * The goal is to keep the total amount of memory used per 1gb of space | 1158 | * The goal is to keep the total amount of memory used per 1gb of space |
@@ -1112,10 +1170,10 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) | |||
1112 | * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as | 1170 | * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as |
1113 | * we add more bitmaps. | 1171 | * we add more bitmaps. |
1114 | */ | 1172 | */ |
1115 | bitmap_bytes = (block_group->total_bitmaps + 1) * PAGE_CACHE_SIZE; | 1173 | bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE; |
1116 | 1174 | ||
1117 | if (bitmap_bytes >= max_bytes) { | 1175 | if (bitmap_bytes >= max_bytes) { |
1118 | block_group->extents_thresh = 0; | 1176 | ctl->extents_thresh = 0; |
1119 | return; | 1177 | return; |
1120 | } | 1178 | } |
1121 | 1179 | ||
@@ -1126,47 +1184,43 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) | |||
1126 | extent_bytes = max_bytes - bitmap_bytes; | 1184 | extent_bytes = max_bytes - bitmap_bytes; |
1127 | extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2)); | 1185 | extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2)); |
1128 | 1186 | ||
1129 | block_group->extents_thresh = | 1187 | ctl->extents_thresh = |
1130 | div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); | 1188 | div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); |
1131 | } | 1189 | } |
1132 | 1190 | ||
1133 | static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, | 1191 | static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, |
1134 | struct btrfs_free_space *info, u64 offset, | 1192 | struct btrfs_free_space *info, u64 offset, |
1135 | u64 bytes) | 1193 | u64 bytes) |
1136 | { | 1194 | { |
1137 | unsigned long start, end; | 1195 | unsigned long start, count; |
1138 | unsigned long i; | ||
1139 | 1196 | ||
1140 | start = offset_to_bit(info->offset, block_group->sectorsize, offset); | 1197 | start = offset_to_bit(info->offset, ctl->unit, offset); |
1141 | end = start + bytes_to_bits(bytes, block_group->sectorsize); | 1198 | count = bytes_to_bits(bytes, ctl->unit); |
1142 | BUG_ON(end > BITS_PER_BITMAP); | 1199 | BUG_ON(start + count > BITS_PER_BITMAP); |
1143 | 1200 | ||
1144 | for (i = start; i < end; i++) | 1201 | bitmap_clear(info->bitmap, start, count); |
1145 | clear_bit(i, info->bitmap); | ||
1146 | 1202 | ||
1147 | info->bytes -= bytes; | 1203 | info->bytes -= bytes; |
1148 | block_group->free_space -= bytes; | 1204 | ctl->free_space -= bytes; |
1149 | } | 1205 | } |
1150 | 1206 | ||
1151 | static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, | 1207 | static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, |
1152 | struct btrfs_free_space *info, u64 offset, | 1208 | struct btrfs_free_space *info, u64 offset, |
1153 | u64 bytes) | 1209 | u64 bytes) |
1154 | { | 1210 | { |
1155 | unsigned long start, end; | 1211 | unsigned long start, count; |
1156 | unsigned long i; | ||
1157 | 1212 | ||
1158 | start = offset_to_bit(info->offset, block_group->sectorsize, offset); | 1213 | start = offset_to_bit(info->offset, ctl->unit, offset); |
1159 | end = start + bytes_to_bits(bytes, block_group->sectorsize); | 1214 | count = bytes_to_bits(bytes, ctl->unit); |
1160 | BUG_ON(end > BITS_PER_BITMAP); | 1215 | BUG_ON(start + count > BITS_PER_BITMAP); |
1161 | 1216 | ||
1162 | for (i = start; i < end; i++) | 1217 | bitmap_set(info->bitmap, start, count); |
1163 | set_bit(i, info->bitmap); | ||
1164 | 1218 | ||
1165 | info->bytes += bytes; | 1219 | info->bytes += bytes; |
1166 | block_group->free_space += bytes; | 1220 | ctl->free_space += bytes; |
1167 | } | 1221 | } |
1168 | 1222 | ||
1169 | static int search_bitmap(struct btrfs_block_group_cache *block_group, | 1223 | static int search_bitmap(struct btrfs_free_space_ctl *ctl, |
1170 | struct btrfs_free_space *bitmap_info, u64 *offset, | 1224 | struct btrfs_free_space *bitmap_info, u64 *offset, |
1171 | u64 *bytes) | 1225 | u64 *bytes) |
1172 | { | 1226 | { |
@@ -1174,9 +1228,9 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group, | |||
1174 | unsigned long bits, i; | 1228 | unsigned long bits, i; |
1175 | unsigned long next_zero; | 1229 | unsigned long next_zero; |
1176 | 1230 | ||
1177 | i = offset_to_bit(bitmap_info->offset, block_group->sectorsize, | 1231 | i = offset_to_bit(bitmap_info->offset, ctl->unit, |
1178 | max_t(u64, *offset, bitmap_info->offset)); | 1232 | max_t(u64, *offset, bitmap_info->offset)); |
1179 | bits = bytes_to_bits(*bytes, block_group->sectorsize); | 1233 | bits = bytes_to_bits(*bytes, ctl->unit); |
1180 | 1234 | ||
1181 | for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); | 1235 | for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); |
1182 | i < BITS_PER_BITMAP; | 1236 | i < BITS_PER_BITMAP; |
@@ -1191,29 +1245,25 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group, | |||
1191 | } | 1245 | } |
1192 | 1246 | ||
1193 | if (found_bits) { | 1247 | if (found_bits) { |
1194 | *offset = (u64)(i * block_group->sectorsize) + | 1248 | *offset = (u64)(i * ctl->unit) + bitmap_info->offset; |
1195 | bitmap_info->offset; | 1249 | *bytes = (u64)(found_bits) * ctl->unit; |
1196 | *bytes = (u64)(found_bits) * block_group->sectorsize; | ||
1197 | return 0; | 1250 | return 0; |
1198 | } | 1251 | } |
1199 | 1252 | ||
1200 | return -1; | 1253 | return -1; |
1201 | } | 1254 | } |
1202 | 1255 | ||
1203 | static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache | 1256 | static struct btrfs_free_space * |
1204 | *block_group, u64 *offset, | 1257 | find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes) |
1205 | u64 *bytes, int debug) | ||
1206 | { | 1258 | { |
1207 | struct btrfs_free_space *entry; | 1259 | struct btrfs_free_space *entry; |
1208 | struct rb_node *node; | 1260 | struct rb_node *node; |
1209 | int ret; | 1261 | int ret; |
1210 | 1262 | ||
1211 | if (!block_group->free_space_offset.rb_node) | 1263 | if (!ctl->free_space_offset.rb_node) |
1212 | return NULL; | 1264 | return NULL; |
1213 | 1265 | ||
1214 | entry = tree_search_offset(block_group, | 1266 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); |
1215 | offset_to_bitmap(block_group, *offset), | ||
1216 | 0, 1); | ||
1217 | if (!entry) | 1267 | if (!entry) |
1218 | return NULL; | 1268 | return NULL; |
1219 | 1269 | ||
@@ -1223,7 +1273,7 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache | |||
1223 | continue; | 1273 | continue; |
1224 | 1274 | ||
1225 | if (entry->bitmap) { | 1275 | if (entry->bitmap) { |
1226 | ret = search_bitmap(block_group, entry, offset, bytes); | 1276 | ret = search_bitmap(ctl, entry, offset, bytes); |
1227 | if (!ret) | 1277 | if (!ret) |
1228 | return entry; | 1278 | return entry; |
1229 | continue; | 1279 | continue; |
@@ -1237,33 +1287,28 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache | |||
1237 | return NULL; | 1287 | return NULL; |
1238 | } | 1288 | } |
1239 | 1289 | ||
1240 | static void add_new_bitmap(struct btrfs_block_group_cache *block_group, | 1290 | static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, |
1241 | struct btrfs_free_space *info, u64 offset) | 1291 | struct btrfs_free_space *info, u64 offset) |
1242 | { | 1292 | { |
1243 | u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; | 1293 | info->offset = offset_to_bitmap(ctl, offset); |
1244 | int max_bitmaps = (int)div64_u64(block_group->key.offset + | ||
1245 | bytes_per_bg - 1, bytes_per_bg); | ||
1246 | BUG_ON(block_group->total_bitmaps >= max_bitmaps); | ||
1247 | |||
1248 | info->offset = offset_to_bitmap(block_group, offset); | ||
1249 | info->bytes = 0; | 1294 | info->bytes = 0; |
1250 | link_free_space(block_group, info); | 1295 | link_free_space(ctl, info); |
1251 | block_group->total_bitmaps++; | 1296 | ctl->total_bitmaps++; |
1252 | 1297 | ||
1253 | recalculate_thresholds(block_group); | 1298 | ctl->op->recalc_thresholds(ctl); |
1254 | } | 1299 | } |
1255 | 1300 | ||
1256 | static void free_bitmap(struct btrfs_block_group_cache *block_group, | 1301 | static void free_bitmap(struct btrfs_free_space_ctl *ctl, |
1257 | struct btrfs_free_space *bitmap_info) | 1302 | struct btrfs_free_space *bitmap_info) |
1258 | { | 1303 | { |
1259 | unlink_free_space(block_group, bitmap_info); | 1304 | unlink_free_space(ctl, bitmap_info); |
1260 | kfree(bitmap_info->bitmap); | 1305 | kfree(bitmap_info->bitmap); |
1261 | kmem_cache_free(btrfs_free_space_cachep, bitmap_info); | 1306 | kmem_cache_free(btrfs_free_space_cachep, bitmap_info); |
1262 | block_group->total_bitmaps--; | 1307 | ctl->total_bitmaps--; |
1263 | recalculate_thresholds(block_group); | 1308 | ctl->op->recalc_thresholds(ctl); |
1264 | } | 1309 | } |
1265 | 1310 | ||
1266 | static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, | 1311 | static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl, |
1267 | struct btrfs_free_space *bitmap_info, | 1312 | struct btrfs_free_space *bitmap_info, |
1268 | u64 *offset, u64 *bytes) | 1313 | u64 *offset, u64 *bytes) |
1269 | { | 1314 | { |
@@ -1272,8 +1317,7 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro | |||
1272 | int ret; | 1317 | int ret; |
1273 | 1318 | ||
1274 | again: | 1319 | again: |
1275 | end = bitmap_info->offset + | 1320 | end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; |
1276 | (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; | ||
1277 | 1321 | ||
1278 | /* | 1322 | /* |
1279 | * XXX - this can go away after a few releases. | 1323 | * XXX - this can go away after a few releases. |
@@ -1288,24 +1332,22 @@ again: | |||
1288 | search_start = *offset; | 1332 | search_start = *offset; |
1289 | search_bytes = *bytes; | 1333 | search_bytes = *bytes; |
1290 | search_bytes = min(search_bytes, end - search_start + 1); | 1334 | search_bytes = min(search_bytes, end - search_start + 1); |
1291 | ret = search_bitmap(block_group, bitmap_info, &search_start, | 1335 | ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); |
1292 | &search_bytes); | ||
1293 | BUG_ON(ret < 0 || search_start != *offset); | 1336 | BUG_ON(ret < 0 || search_start != *offset); |
1294 | 1337 | ||
1295 | if (*offset > bitmap_info->offset && *offset + *bytes > end) { | 1338 | if (*offset > bitmap_info->offset && *offset + *bytes > end) { |
1296 | bitmap_clear_bits(block_group, bitmap_info, *offset, | 1339 | bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1); |
1297 | end - *offset + 1); | ||
1298 | *bytes -= end - *offset + 1; | 1340 | *bytes -= end - *offset + 1; |
1299 | *offset = end + 1; | 1341 | *offset = end + 1; |
1300 | } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { | 1342 | } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { |
1301 | bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes); | 1343 | bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes); |
1302 | *bytes = 0; | 1344 | *bytes = 0; |
1303 | } | 1345 | } |
1304 | 1346 | ||
1305 | if (*bytes) { | 1347 | if (*bytes) { |
1306 | struct rb_node *next = rb_next(&bitmap_info->offset_index); | 1348 | struct rb_node *next = rb_next(&bitmap_info->offset_index); |
1307 | if (!bitmap_info->bytes) | 1349 | if (!bitmap_info->bytes) |
1308 | free_bitmap(block_group, bitmap_info); | 1350 | free_bitmap(ctl, bitmap_info); |
1309 | 1351 | ||
1310 | /* | 1352 | /* |
1311 | * no entry after this bitmap, but we still have bytes to | 1353 | * no entry after this bitmap, but we still have bytes to |
@@ -1332,31 +1374,28 @@ again: | |||
1332 | */ | 1374 | */ |
1333 | search_start = *offset; | 1375 | search_start = *offset; |
1334 | search_bytes = *bytes; | 1376 | search_bytes = *bytes; |
1335 | ret = search_bitmap(block_group, bitmap_info, &search_start, | 1377 | ret = search_bitmap(ctl, bitmap_info, &search_start, |
1336 | &search_bytes); | 1378 | &search_bytes); |
1337 | if (ret < 0 || search_start != *offset) | 1379 | if (ret < 0 || search_start != *offset) |
1338 | return -EAGAIN; | 1380 | return -EAGAIN; |
1339 | 1381 | ||
1340 | goto again; | 1382 | goto again; |
1341 | } else if (!bitmap_info->bytes) | 1383 | } else if (!bitmap_info->bytes) |
1342 | free_bitmap(block_group, bitmap_info); | 1384 | free_bitmap(ctl, bitmap_info); |
1343 | 1385 | ||
1344 | return 0; | 1386 | return 0; |
1345 | } | 1387 | } |
1346 | 1388 | ||
1347 | static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, | 1389 | static bool use_bitmap(struct btrfs_free_space_ctl *ctl, |
1348 | struct btrfs_free_space *info) | 1390 | struct btrfs_free_space *info) |
1349 | { | 1391 | { |
1350 | struct btrfs_free_space *bitmap_info; | 1392 | struct btrfs_block_group_cache *block_group = ctl->private; |
1351 | int added = 0; | ||
1352 | u64 bytes, offset, end; | ||
1353 | int ret; | ||
1354 | 1393 | ||
1355 | /* | 1394 | /* |
1356 | * If we are below the extents threshold then we can add this as an | 1395 | * If we are below the extents threshold then we can add this as an |
1357 | * extent, and don't have to deal with the bitmap | 1396 | * extent, and don't have to deal with the bitmap |
1358 | */ | 1397 | */ |
1359 | if (block_group->free_extents < block_group->extents_thresh) { | 1398 | if (ctl->free_extents < ctl->extents_thresh) { |
1360 | /* | 1399 | /* |
1361 | * If this block group has some small extents we don't want to | 1400 | * If this block group has some small extents we don't want to |
1362 | * use up all of our free slots in the cache with them, we want | 1401 | * use up all of our free slots in the cache with them, we want |
@@ -1365,11 +1404,10 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, | |||
1365 | * the overhead of a bitmap if we don't have to. | 1404 | * the overhead of a bitmap if we don't have to. |
1366 | */ | 1405 | */ |
1367 | if (info->bytes <= block_group->sectorsize * 4) { | 1406 | if (info->bytes <= block_group->sectorsize * 4) { |
1368 | if (block_group->free_extents * 2 <= | 1407 | if (ctl->free_extents * 2 <= ctl->extents_thresh) |
1369 | block_group->extents_thresh) | 1408 | return false; |
1370 | return 0; | ||
1371 | } else { | 1409 | } else { |
1372 | return 0; | 1410 | return false; |
1373 | } | 1411 | } |
1374 | } | 1412 | } |
1375 | 1413 | ||
@@ -1379,31 +1417,42 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, | |||
1379 | */ | 1417 | */ |
1380 | if (BITS_PER_BITMAP * block_group->sectorsize > | 1418 | if (BITS_PER_BITMAP * block_group->sectorsize > |
1381 | block_group->key.offset) | 1419 | block_group->key.offset) |
1382 | return 0; | 1420 | return false; |
1421 | |||
1422 | return true; | ||
1423 | } | ||
1424 | |||
1425 | static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, | ||
1426 | struct btrfs_free_space *info) | ||
1427 | { | ||
1428 | struct btrfs_free_space *bitmap_info; | ||
1429 | int added = 0; | ||
1430 | u64 bytes, offset, end; | ||
1431 | int ret; | ||
1383 | 1432 | ||
1384 | bytes = info->bytes; | 1433 | bytes = info->bytes; |
1385 | offset = info->offset; | 1434 | offset = info->offset; |
1386 | 1435 | ||
1436 | if (!ctl->op->use_bitmap(ctl, info)) | ||
1437 | return 0; | ||
1438 | |||
1387 | again: | 1439 | again: |
1388 | bitmap_info = tree_search_offset(block_group, | 1440 | bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
1389 | offset_to_bitmap(block_group, offset), | ||
1390 | 1, 0); | 1441 | 1, 0); |
1391 | if (!bitmap_info) { | 1442 | if (!bitmap_info) { |
1392 | BUG_ON(added); | 1443 | BUG_ON(added); |
1393 | goto new_bitmap; | 1444 | goto new_bitmap; |
1394 | } | 1445 | } |
1395 | 1446 | ||
1396 | end = bitmap_info->offset + | 1447 | end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); |
1397 | (u64)(BITS_PER_BITMAP * block_group->sectorsize); | ||
1398 | 1448 | ||
1399 | if (offset >= bitmap_info->offset && offset + bytes > end) { | 1449 | if (offset >= bitmap_info->offset && offset + bytes > end) { |
1400 | bitmap_set_bits(block_group, bitmap_info, offset, | 1450 | bitmap_set_bits(ctl, bitmap_info, offset, end - offset); |
1401 | end - offset); | ||
1402 | bytes -= end - offset; | 1451 | bytes -= end - offset; |
1403 | offset = end; | 1452 | offset = end; |
1404 | added = 0; | 1453 | added = 0; |
1405 | } else if (offset >= bitmap_info->offset && offset + bytes <= end) { | 1454 | } else if (offset >= bitmap_info->offset && offset + bytes <= end) { |
1406 | bitmap_set_bits(block_group, bitmap_info, offset, bytes); | 1455 | bitmap_set_bits(ctl, bitmap_info, offset, bytes); |
1407 | bytes = 0; | 1456 | bytes = 0; |
1408 | } else { | 1457 | } else { |
1409 | BUG(); | 1458 | BUG(); |
@@ -1417,19 +1466,19 @@ again: | |||
1417 | 1466 | ||
1418 | new_bitmap: | 1467 | new_bitmap: |
1419 | if (info && info->bitmap) { | 1468 | if (info && info->bitmap) { |
1420 | add_new_bitmap(block_group, info, offset); | 1469 | add_new_bitmap(ctl, info, offset); |
1421 | added = 1; | 1470 | added = 1; |
1422 | info = NULL; | 1471 | info = NULL; |
1423 | goto again; | 1472 | goto again; |
1424 | } else { | 1473 | } else { |
1425 | spin_unlock(&block_group->tree_lock); | 1474 | spin_unlock(&ctl->tree_lock); |
1426 | 1475 | ||
1427 | /* no pre-allocated info, allocate a new one */ | 1476 | /* no pre-allocated info, allocate a new one */ |
1428 | if (!info) { | 1477 | if (!info) { |
1429 | info = kmem_cache_zalloc(btrfs_free_space_cachep, | 1478 | info = kmem_cache_zalloc(btrfs_free_space_cachep, |
1430 | GFP_NOFS); | 1479 | GFP_NOFS); |
1431 | if (!info) { | 1480 | if (!info) { |
1432 | spin_lock(&block_group->tree_lock); | 1481 | spin_lock(&ctl->tree_lock); |
1433 | ret = -ENOMEM; | 1482 | ret = -ENOMEM; |
1434 | goto out; | 1483 | goto out; |
1435 | } | 1484 | } |
@@ -1437,7 +1486,7 @@ new_bitmap: | |||
1437 | 1486 | ||
1438 | /* allocate the bitmap */ | 1487 | /* allocate the bitmap */ |
1439 | info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); | 1488 | info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); |
1440 | spin_lock(&block_group->tree_lock); | 1489 | spin_lock(&ctl->tree_lock); |
1441 | if (!info->bitmap) { | 1490 | if (!info->bitmap) { |
1442 | ret = -ENOMEM; | 1491 | ret = -ENOMEM; |
1443 | goto out; | 1492 | goto out; |
@@ -1455,7 +1504,7 @@ out: | |||
1455 | return ret; | 1504 | return ret; |
1456 | } | 1505 | } |
1457 | 1506 | ||
1458 | bool try_merge_free_space(struct btrfs_block_group_cache *block_group, | 1507 | bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, |
1459 | struct btrfs_free_space *info, bool update_stat) | 1508 | struct btrfs_free_space *info, bool update_stat) |
1460 | { | 1509 | { |
1461 | struct btrfs_free_space *left_info; | 1510 | struct btrfs_free_space *left_info; |
@@ -1469,18 +1518,18 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, | |||
1469 | * are adding, if there is remove that struct and add a new one to | 1518 | * are adding, if there is remove that struct and add a new one to |
1470 | * cover the entire range | 1519 | * cover the entire range |
1471 | */ | 1520 | */ |
1472 | right_info = tree_search_offset(block_group, offset + bytes, 0, 0); | 1521 | right_info = tree_search_offset(ctl, offset + bytes, 0, 0); |
1473 | if (right_info && rb_prev(&right_info->offset_index)) | 1522 | if (right_info && rb_prev(&right_info->offset_index)) |
1474 | left_info = rb_entry(rb_prev(&right_info->offset_index), | 1523 | left_info = rb_entry(rb_prev(&right_info->offset_index), |
1475 | struct btrfs_free_space, offset_index); | 1524 | struct btrfs_free_space, offset_index); |
1476 | else | 1525 | else |
1477 | left_info = tree_search_offset(block_group, offset - 1, 0, 0); | 1526 | left_info = tree_search_offset(ctl, offset - 1, 0, 0); |
1478 | 1527 | ||
1479 | if (right_info && !right_info->bitmap) { | 1528 | if (right_info && !right_info->bitmap) { |
1480 | if (update_stat) | 1529 | if (update_stat) |
1481 | unlink_free_space(block_group, right_info); | 1530 | unlink_free_space(ctl, right_info); |
1482 | else | 1531 | else |
1483 | __unlink_free_space(block_group, right_info); | 1532 | __unlink_free_space(ctl, right_info); |
1484 | info->bytes += right_info->bytes; | 1533 | info->bytes += right_info->bytes; |
1485 | kmem_cache_free(btrfs_free_space_cachep, right_info); | 1534 | kmem_cache_free(btrfs_free_space_cachep, right_info); |
1486 | merged = true; | 1535 | merged = true; |
@@ -1489,9 +1538,9 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, | |||
1489 | if (left_info && !left_info->bitmap && | 1538 | if (left_info && !left_info->bitmap && |
1490 | left_info->offset + left_info->bytes == offset) { | 1539 | left_info->offset + left_info->bytes == offset) { |
1491 | if (update_stat) | 1540 | if (update_stat) |
1492 | unlink_free_space(block_group, left_info); | 1541 | unlink_free_space(ctl, left_info); |
1493 | else | 1542 | else |
1494 | __unlink_free_space(block_group, left_info); | 1543 | __unlink_free_space(ctl, left_info); |
1495 | info->offset = left_info->offset; | 1544 | info->offset = left_info->offset; |
1496 | info->bytes += left_info->bytes; | 1545 | info->bytes += left_info->bytes; |
1497 | kmem_cache_free(btrfs_free_space_cachep, left_info); | 1546 | kmem_cache_free(btrfs_free_space_cachep, left_info); |
@@ -1501,8 +1550,8 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, | |||
1501 | return merged; | 1550 | return merged; |
1502 | } | 1551 | } |
1503 | 1552 | ||
1504 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 1553 | int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, |
1505 | u64 offset, u64 bytes) | 1554 | u64 offset, u64 bytes) |
1506 | { | 1555 | { |
1507 | struct btrfs_free_space *info; | 1556 | struct btrfs_free_space *info; |
1508 | int ret = 0; | 1557 | int ret = 0; |
@@ -1514,9 +1563,9 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
1514 | info->offset = offset; | 1563 | info->offset = offset; |
1515 | info->bytes = bytes; | 1564 | info->bytes = bytes; |
1516 | 1565 | ||
1517 | spin_lock(&block_group->tree_lock); | 1566 | spin_lock(&ctl->tree_lock); |
1518 | 1567 | ||
1519 | if (try_merge_free_space(block_group, info, true)) | 1568 | if (try_merge_free_space(ctl, info, true)) |
1520 | goto link; | 1569 | goto link; |
1521 | 1570 | ||
1522 | /* | 1571 | /* |
@@ -1524,7 +1573,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
1524 | * extent then we know we're going to have to allocate a new extent, so | 1573 | * extent then we know we're going to have to allocate a new extent, so |
1525 | * before we do that see if we need to drop this into a bitmap | 1574 | * before we do that see if we need to drop this into a bitmap |
1526 | */ | 1575 | */ |
1527 | ret = insert_into_bitmap(block_group, info); | 1576 | ret = insert_into_bitmap(ctl, info); |
1528 | if (ret < 0) { | 1577 | if (ret < 0) { |
1529 | goto out; | 1578 | goto out; |
1530 | } else if (ret) { | 1579 | } else if (ret) { |
@@ -1532,11 +1581,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
1532 | goto out; | 1581 | goto out; |
1533 | } | 1582 | } |
1534 | link: | 1583 | link: |
1535 | ret = link_free_space(block_group, info); | 1584 | ret = link_free_space(ctl, info); |
1536 | if (ret) | 1585 | if (ret) |
1537 | kmem_cache_free(btrfs_free_space_cachep, info); | 1586 | kmem_cache_free(btrfs_free_space_cachep, info); |
1538 | out: | 1587 | out: |
1539 | spin_unlock(&block_group->tree_lock); | 1588 | spin_unlock(&ctl->tree_lock); |
1540 | 1589 | ||
1541 | if (ret) { | 1590 | if (ret) { |
1542 | printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); | 1591 | printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); |
@@ -1549,21 +1598,21 @@ out: | |||
1549 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | 1598 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |
1550 | u64 offset, u64 bytes) | 1599 | u64 offset, u64 bytes) |
1551 | { | 1600 | { |
1601 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1552 | struct btrfs_free_space *info; | 1602 | struct btrfs_free_space *info; |
1553 | struct btrfs_free_space *next_info = NULL; | 1603 | struct btrfs_free_space *next_info = NULL; |
1554 | int ret = 0; | 1604 | int ret = 0; |
1555 | 1605 | ||
1556 | spin_lock(&block_group->tree_lock); | 1606 | spin_lock(&ctl->tree_lock); |
1557 | 1607 | ||
1558 | again: | 1608 | again: |
1559 | info = tree_search_offset(block_group, offset, 0, 0); | 1609 | info = tree_search_offset(ctl, offset, 0, 0); |
1560 | if (!info) { | 1610 | if (!info) { |
1561 | /* | 1611 | /* |
1562 | * oops didn't find an extent that matched the space we wanted | 1612 | * oops didn't find an extent that matched the space we wanted |
1563 | * to remove, look for a bitmap instead | 1613 | * to remove, look for a bitmap instead |
1564 | */ | 1614 | */ |
1565 | info = tree_search_offset(block_group, | 1615 | info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
1566 | offset_to_bitmap(block_group, offset), | ||
1567 | 1, 0); | 1616 | 1, 0); |
1568 | if (!info) { | 1617 | if (!info) { |
1569 | WARN_ON(1); | 1618 | WARN_ON(1); |
@@ -1578,8 +1627,8 @@ again: | |||
1578 | offset_index); | 1627 | offset_index); |
1579 | 1628 | ||
1580 | if (next_info->bitmap) | 1629 | if (next_info->bitmap) |
1581 | end = next_info->offset + BITS_PER_BITMAP * | 1630 | end = next_info->offset + |
1582 | block_group->sectorsize - 1; | 1631 | BITS_PER_BITMAP * ctl->unit - 1; |
1583 | else | 1632 | else |
1584 | end = next_info->offset + next_info->bytes; | 1633 | end = next_info->offset + next_info->bytes; |
1585 | 1634 | ||
@@ -1599,20 +1648,20 @@ again: | |||
1599 | } | 1648 | } |
1600 | 1649 | ||
1601 | if (info->bytes == bytes) { | 1650 | if (info->bytes == bytes) { |
1602 | unlink_free_space(block_group, info); | 1651 | unlink_free_space(ctl, info); |
1603 | if (info->bitmap) { | 1652 | if (info->bitmap) { |
1604 | kfree(info->bitmap); | 1653 | kfree(info->bitmap); |
1605 | block_group->total_bitmaps--; | 1654 | ctl->total_bitmaps--; |
1606 | } | 1655 | } |
1607 | kmem_cache_free(btrfs_free_space_cachep, info); | 1656 | kmem_cache_free(btrfs_free_space_cachep, info); |
1608 | goto out_lock; | 1657 | goto out_lock; |
1609 | } | 1658 | } |
1610 | 1659 | ||
1611 | if (!info->bitmap && info->offset == offset) { | 1660 | if (!info->bitmap && info->offset == offset) { |
1612 | unlink_free_space(block_group, info); | 1661 | unlink_free_space(ctl, info); |
1613 | info->offset += bytes; | 1662 | info->offset += bytes; |
1614 | info->bytes -= bytes; | 1663 | info->bytes -= bytes; |
1615 | link_free_space(block_group, info); | 1664 | link_free_space(ctl, info); |
1616 | goto out_lock; | 1665 | goto out_lock; |
1617 | } | 1666 | } |
1618 | 1667 | ||
@@ -1626,13 +1675,13 @@ again: | |||
1626 | * first unlink the old info and then | 1675 | * first unlink the old info and then |
1627 | * insert it again after the hole we're creating | 1676 | * insert it again after the hole we're creating |
1628 | */ | 1677 | */ |
1629 | unlink_free_space(block_group, info); | 1678 | unlink_free_space(ctl, info); |
1630 | if (offset + bytes < info->offset + info->bytes) { | 1679 | if (offset + bytes < info->offset + info->bytes) { |
1631 | u64 old_end = info->offset + info->bytes; | 1680 | u64 old_end = info->offset + info->bytes; |
1632 | 1681 | ||
1633 | info->offset = offset + bytes; | 1682 | info->offset = offset + bytes; |
1634 | info->bytes = old_end - info->offset; | 1683 | info->bytes = old_end - info->offset; |
1635 | ret = link_free_space(block_group, info); | 1684 | ret = link_free_space(ctl, info); |
1636 | WARN_ON(ret); | 1685 | WARN_ON(ret); |
1637 | if (ret) | 1686 | if (ret) |
1638 | goto out_lock; | 1687 | goto out_lock; |
@@ -1642,7 +1691,7 @@ again: | |||
1642 | */ | 1691 | */ |
1643 | kmem_cache_free(btrfs_free_space_cachep, info); | 1692 | kmem_cache_free(btrfs_free_space_cachep, info); |
1644 | } | 1693 | } |
1645 | spin_unlock(&block_group->tree_lock); | 1694 | spin_unlock(&ctl->tree_lock); |
1646 | 1695 | ||
1647 | /* step two, insert a new info struct to cover | 1696 | /* step two, insert a new info struct to cover |
1648 | * anything before the hole | 1697 | * anything before the hole |
@@ -1653,12 +1702,12 @@ again: | |||
1653 | goto out; | 1702 | goto out; |
1654 | } | 1703 | } |
1655 | 1704 | ||
1656 | ret = remove_from_bitmap(block_group, info, &offset, &bytes); | 1705 | ret = remove_from_bitmap(ctl, info, &offset, &bytes); |
1657 | if (ret == -EAGAIN) | 1706 | if (ret == -EAGAIN) |
1658 | goto again; | 1707 | goto again; |
1659 | BUG_ON(ret); | 1708 | BUG_ON(ret); |
1660 | out_lock: | 1709 | out_lock: |
1661 | spin_unlock(&block_group->tree_lock); | 1710 | spin_unlock(&ctl->tree_lock); |
1662 | out: | 1711 | out: |
1663 | return ret; | 1712 | return ret; |
1664 | } | 1713 | } |
@@ -1666,11 +1715,12 @@ out: | |||
1666 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | 1715 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, |
1667 | u64 bytes) | 1716 | u64 bytes) |
1668 | { | 1717 | { |
1718 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1669 | struct btrfs_free_space *info; | 1719 | struct btrfs_free_space *info; |
1670 | struct rb_node *n; | 1720 | struct rb_node *n; |
1671 | int count = 0; | 1721 | int count = 0; |
1672 | 1722 | ||
1673 | for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) { | 1723 | for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { |
1674 | info = rb_entry(n, struct btrfs_free_space, offset_index); | 1724 | info = rb_entry(n, struct btrfs_free_space, offset_index); |
1675 | if (info->bytes >= bytes) | 1725 | if (info->bytes >= bytes) |
1676 | count++; | 1726 | count++; |
@@ -1685,19 +1735,28 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | |||
1685 | "\n", count); | 1735 | "\n", count); |
1686 | } | 1736 | } |
1687 | 1737 | ||
1688 | u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group) | 1738 | static struct btrfs_free_space_op free_space_op = { |
1739 | .recalc_thresholds = recalculate_thresholds, | ||
1740 | .use_bitmap = use_bitmap, | ||
1741 | }; | ||
1742 | |||
1743 | void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) | ||
1689 | { | 1744 | { |
1690 | struct btrfs_free_space *info; | 1745 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
1691 | struct rb_node *n; | ||
1692 | u64 ret = 0; | ||
1693 | 1746 | ||
1694 | for (n = rb_first(&block_group->free_space_offset); n; | 1747 | spin_lock_init(&ctl->tree_lock); |
1695 | n = rb_next(n)) { | 1748 | ctl->unit = block_group->sectorsize; |
1696 | info = rb_entry(n, struct btrfs_free_space, offset_index); | 1749 | ctl->start = block_group->key.objectid; |
1697 | ret += info->bytes; | 1750 | ctl->private = block_group; |
1698 | } | 1751 | ctl->op = &free_space_op; |
1699 | 1752 | ||
1700 | return ret; | 1753 | /* |
1754 | * we only want to have 32k of ram per block group for keeping | ||
1755 | * track of free space, and if we pass 1/2 of that we want to | ||
1756 | * start converting things over to using bitmaps | ||
1757 | */ | ||
1758 | ctl->extents_thresh = ((1024 * 32) / 2) / | ||
1759 | sizeof(struct btrfs_free_space); | ||
1701 | } | 1760 | } |
1702 | 1761 | ||
1703 | /* | 1762 | /* |
@@ -1711,6 +1770,7 @@ __btrfs_return_cluster_to_free_space( | |||
1711 | struct btrfs_block_group_cache *block_group, | 1770 | struct btrfs_block_group_cache *block_group, |
1712 | struct btrfs_free_cluster *cluster) | 1771 | struct btrfs_free_cluster *cluster) |
1713 | { | 1772 | { |
1773 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1714 | struct btrfs_free_space *entry; | 1774 | struct btrfs_free_space *entry; |
1715 | struct rb_node *node; | 1775 | struct rb_node *node; |
1716 | 1776 | ||
@@ -1732,8 +1792,8 @@ __btrfs_return_cluster_to_free_space( | |||
1732 | 1792 | ||
1733 | bitmap = (entry->bitmap != NULL); | 1793 | bitmap = (entry->bitmap != NULL); |
1734 | if (!bitmap) | 1794 | if (!bitmap) |
1735 | try_merge_free_space(block_group, entry, false); | 1795 | try_merge_free_space(ctl, entry, false); |
1736 | tree_insert_offset(&block_group->free_space_offset, | 1796 | tree_insert_offset(&ctl->free_space_offset, |
1737 | entry->offset, &entry->offset_index, bitmap); | 1797 | entry->offset, &entry->offset_index, bitmap); |
1738 | } | 1798 | } |
1739 | cluster->root = RB_ROOT; | 1799 | cluster->root = RB_ROOT; |
@@ -1744,14 +1804,38 @@ out: | |||
1744 | return 0; | 1804 | return 0; |
1745 | } | 1805 | } |
1746 | 1806 | ||
1747 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | 1807 | void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl) |
1748 | { | 1808 | { |
1749 | struct btrfs_free_space *info; | 1809 | struct btrfs_free_space *info; |
1750 | struct rb_node *node; | 1810 | struct rb_node *node; |
1811 | |||
1812 | while ((node = rb_last(&ctl->free_space_offset)) != NULL) { | ||
1813 | info = rb_entry(node, struct btrfs_free_space, offset_index); | ||
1814 | unlink_free_space(ctl, info); | ||
1815 | kfree(info->bitmap); | ||
1816 | kmem_cache_free(btrfs_free_space_cachep, info); | ||
1817 | if (need_resched()) { | ||
1818 | spin_unlock(&ctl->tree_lock); | ||
1819 | cond_resched(); | ||
1820 | spin_lock(&ctl->tree_lock); | ||
1821 | } | ||
1822 | } | ||
1823 | } | ||
1824 | |||
1825 | void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) | ||
1826 | { | ||
1827 | spin_lock(&ctl->tree_lock); | ||
1828 | __btrfs_remove_free_space_cache_locked(ctl); | ||
1829 | spin_unlock(&ctl->tree_lock); | ||
1830 | } | ||
1831 | |||
1832 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | ||
1833 | { | ||
1834 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1751 | struct btrfs_free_cluster *cluster; | 1835 | struct btrfs_free_cluster *cluster; |
1752 | struct list_head *head; | 1836 | struct list_head *head; |
1753 | 1837 | ||
1754 | spin_lock(&block_group->tree_lock); | 1838 | spin_lock(&ctl->tree_lock); |
1755 | while ((head = block_group->cluster_list.next) != | 1839 | while ((head = block_group->cluster_list.next) != |
1756 | &block_group->cluster_list) { | 1840 | &block_group->cluster_list) { |
1757 | cluster = list_entry(head, struct btrfs_free_cluster, | 1841 | cluster = list_entry(head, struct btrfs_free_cluster, |
@@ -1760,60 +1844,46 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | |||
1760 | WARN_ON(cluster->block_group != block_group); | 1844 | WARN_ON(cluster->block_group != block_group); |
1761 | __btrfs_return_cluster_to_free_space(block_group, cluster); | 1845 | __btrfs_return_cluster_to_free_space(block_group, cluster); |
1762 | if (need_resched()) { | 1846 | if (need_resched()) { |
1763 | spin_unlock(&block_group->tree_lock); | 1847 | spin_unlock(&ctl->tree_lock); |
1764 | cond_resched(); | 1848 | cond_resched(); |
1765 | spin_lock(&block_group->tree_lock); | 1849 | spin_lock(&ctl->tree_lock); |
1766 | } | 1850 | } |
1767 | } | 1851 | } |
1852 | __btrfs_remove_free_space_cache_locked(ctl); | ||
1853 | spin_unlock(&ctl->tree_lock); | ||
1768 | 1854 | ||
1769 | while ((node = rb_last(&block_group->free_space_offset)) != NULL) { | ||
1770 | info = rb_entry(node, struct btrfs_free_space, offset_index); | ||
1771 | if (!info->bitmap) { | ||
1772 | unlink_free_space(block_group, info); | ||
1773 | kmem_cache_free(btrfs_free_space_cachep, info); | ||
1774 | } else { | ||
1775 | free_bitmap(block_group, info); | ||
1776 | } | ||
1777 | |||
1778 | if (need_resched()) { | ||
1779 | spin_unlock(&block_group->tree_lock); | ||
1780 | cond_resched(); | ||
1781 | spin_lock(&block_group->tree_lock); | ||
1782 | } | ||
1783 | } | ||
1784 | |||
1785 | spin_unlock(&block_group->tree_lock); | ||
1786 | } | 1855 | } |
1787 | 1856 | ||
1788 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | 1857 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, |
1789 | u64 offset, u64 bytes, u64 empty_size) | 1858 | u64 offset, u64 bytes, u64 empty_size) |
1790 | { | 1859 | { |
1860 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1791 | struct btrfs_free_space *entry = NULL; | 1861 | struct btrfs_free_space *entry = NULL; |
1792 | u64 bytes_search = bytes + empty_size; | 1862 | u64 bytes_search = bytes + empty_size; |
1793 | u64 ret = 0; | 1863 | u64 ret = 0; |
1794 | 1864 | ||
1795 | spin_lock(&block_group->tree_lock); | 1865 | spin_lock(&ctl->tree_lock); |
1796 | entry = find_free_space(block_group, &offset, &bytes_search, 0); | 1866 | entry = find_free_space(ctl, &offset, &bytes_search); |
1797 | if (!entry) | 1867 | if (!entry) |
1798 | goto out; | 1868 | goto out; |
1799 | 1869 | ||
1800 | ret = offset; | 1870 | ret = offset; |
1801 | if (entry->bitmap) { | 1871 | if (entry->bitmap) { |
1802 | bitmap_clear_bits(block_group, entry, offset, bytes); | 1872 | bitmap_clear_bits(ctl, entry, offset, bytes); |
1803 | if (!entry->bytes) | 1873 | if (!entry->bytes) |
1804 | free_bitmap(block_group, entry); | 1874 | free_bitmap(ctl, entry); |
1805 | } else { | 1875 | } else { |
1806 | unlink_free_space(block_group, entry); | 1876 | unlink_free_space(ctl, entry); |
1807 | entry->offset += bytes; | 1877 | entry->offset += bytes; |
1808 | entry->bytes -= bytes; | 1878 | entry->bytes -= bytes; |
1809 | if (!entry->bytes) | 1879 | if (!entry->bytes) |
1810 | kmem_cache_free(btrfs_free_space_cachep, entry); | 1880 | kmem_cache_free(btrfs_free_space_cachep, entry); |
1811 | else | 1881 | else |
1812 | link_free_space(block_group, entry); | 1882 | link_free_space(ctl, entry); |
1813 | } | 1883 | } |
1814 | 1884 | ||
1815 | out: | 1885 | out: |
1816 | spin_unlock(&block_group->tree_lock); | 1886 | spin_unlock(&ctl->tree_lock); |
1817 | 1887 | ||
1818 | return ret; | 1888 | return ret; |
1819 | } | 1889 | } |
@@ -1830,6 +1900,7 @@ int btrfs_return_cluster_to_free_space( | |||
1830 | struct btrfs_block_group_cache *block_group, | 1900 | struct btrfs_block_group_cache *block_group, |
1831 | struct btrfs_free_cluster *cluster) | 1901 | struct btrfs_free_cluster *cluster) |
1832 | { | 1902 | { |
1903 | struct btrfs_free_space_ctl *ctl; | ||
1833 | int ret; | 1904 | int ret; |
1834 | 1905 | ||
1835 | /* first, get a safe pointer to the block group */ | 1906 | /* first, get a safe pointer to the block group */ |
@@ -1848,10 +1919,12 @@ int btrfs_return_cluster_to_free_space( | |||
1848 | atomic_inc(&block_group->count); | 1919 | atomic_inc(&block_group->count); |
1849 | spin_unlock(&cluster->lock); | 1920 | spin_unlock(&cluster->lock); |
1850 | 1921 | ||
1922 | ctl = block_group->free_space_ctl; | ||
1923 | |||
1851 | /* now return any extents the cluster had on it */ | 1924 | /* now return any extents the cluster had on it */ |
1852 | spin_lock(&block_group->tree_lock); | 1925 | spin_lock(&ctl->tree_lock); |
1853 | ret = __btrfs_return_cluster_to_free_space(block_group, cluster); | 1926 | ret = __btrfs_return_cluster_to_free_space(block_group, cluster); |
1854 | spin_unlock(&block_group->tree_lock); | 1927 | spin_unlock(&ctl->tree_lock); |
1855 | 1928 | ||
1856 | /* finally drop our ref */ | 1929 | /* finally drop our ref */ |
1857 | btrfs_put_block_group(block_group); | 1930 | btrfs_put_block_group(block_group); |
@@ -1863,6 +1936,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | |||
1863 | struct btrfs_free_space *entry, | 1936 | struct btrfs_free_space *entry, |
1864 | u64 bytes, u64 min_start) | 1937 | u64 bytes, u64 min_start) |
1865 | { | 1938 | { |
1939 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1866 | int err; | 1940 | int err; |
1867 | u64 search_start = cluster->window_start; | 1941 | u64 search_start = cluster->window_start; |
1868 | u64 search_bytes = bytes; | 1942 | u64 search_bytes = bytes; |
@@ -1871,13 +1945,12 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | |||
1871 | search_start = min_start; | 1945 | search_start = min_start; |
1872 | search_bytes = bytes; | 1946 | search_bytes = bytes; |
1873 | 1947 | ||
1874 | err = search_bitmap(block_group, entry, &search_start, | 1948 | err = search_bitmap(ctl, entry, &search_start, &search_bytes); |
1875 | &search_bytes); | ||
1876 | if (err) | 1949 | if (err) |
1877 | return 0; | 1950 | return 0; |
1878 | 1951 | ||
1879 | ret = search_start; | 1952 | ret = search_start; |
1880 | bitmap_clear_bits(block_group, entry, ret, bytes); | 1953 | bitmap_clear_bits(ctl, entry, ret, bytes); |
1881 | 1954 | ||
1882 | return ret; | 1955 | return ret; |
1883 | } | 1956 | } |
@@ -1891,6 +1964,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
1891 | struct btrfs_free_cluster *cluster, u64 bytes, | 1964 | struct btrfs_free_cluster *cluster, u64 bytes, |
1892 | u64 min_start) | 1965 | u64 min_start) |
1893 | { | 1966 | { |
1967 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1894 | struct btrfs_free_space *entry = NULL; | 1968 | struct btrfs_free_space *entry = NULL; |
1895 | struct rb_node *node; | 1969 | struct rb_node *node; |
1896 | u64 ret = 0; | 1970 | u64 ret = 0; |
@@ -1951,20 +2025,20 @@ out: | |||
1951 | if (!ret) | 2025 | if (!ret) |
1952 | return 0; | 2026 | return 0; |
1953 | 2027 | ||
1954 | spin_lock(&block_group->tree_lock); | 2028 | spin_lock(&ctl->tree_lock); |
1955 | 2029 | ||
1956 | block_group->free_space -= bytes; | 2030 | ctl->free_space -= bytes; |
1957 | if (entry->bytes == 0) { | 2031 | if (entry->bytes == 0) { |
1958 | block_group->free_extents--; | 2032 | ctl->free_extents--; |
1959 | if (entry->bitmap) { | 2033 | if (entry->bitmap) { |
1960 | kfree(entry->bitmap); | 2034 | kfree(entry->bitmap); |
1961 | block_group->total_bitmaps--; | 2035 | ctl->total_bitmaps--; |
1962 | recalculate_thresholds(block_group); | 2036 | ctl->op->recalc_thresholds(ctl); |
1963 | } | 2037 | } |
1964 | kmem_cache_free(btrfs_free_space_cachep, entry); | 2038 | kmem_cache_free(btrfs_free_space_cachep, entry); |
1965 | } | 2039 | } |
1966 | 2040 | ||
1967 | spin_unlock(&block_group->tree_lock); | 2041 | spin_unlock(&ctl->tree_lock); |
1968 | 2042 | ||
1969 | return ret; | 2043 | return ret; |
1970 | } | 2044 | } |
@@ -1974,6 +2048,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, | |||
1974 | struct btrfs_free_cluster *cluster, | 2048 | struct btrfs_free_cluster *cluster, |
1975 | u64 offset, u64 bytes, u64 min_bytes) | 2049 | u64 offset, u64 bytes, u64 min_bytes) |
1976 | { | 2050 | { |
2051 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1977 | unsigned long next_zero; | 2052 | unsigned long next_zero; |
1978 | unsigned long i; | 2053 | unsigned long i; |
1979 | unsigned long search_bits; | 2054 | unsigned long search_bits; |
@@ -2028,7 +2103,7 @@ again: | |||
2028 | 2103 | ||
2029 | cluster->window_start = start * block_group->sectorsize + | 2104 | cluster->window_start = start * block_group->sectorsize + |
2030 | entry->offset; | 2105 | entry->offset; |
2031 | rb_erase(&entry->offset_index, &block_group->free_space_offset); | 2106 | rb_erase(&entry->offset_index, &ctl->free_space_offset); |
2032 | ret = tree_insert_offset(&cluster->root, entry->offset, | 2107 | ret = tree_insert_offset(&cluster->root, entry->offset, |
2033 | &entry->offset_index, 1); | 2108 | &entry->offset_index, 1); |
2034 | BUG_ON(ret); | 2109 | BUG_ON(ret); |
@@ -2043,6 +2118,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2043 | struct btrfs_free_cluster *cluster, | 2118 | struct btrfs_free_cluster *cluster, |
2044 | u64 offset, u64 bytes, u64 min_bytes) | 2119 | u64 offset, u64 bytes, u64 min_bytes) |
2045 | { | 2120 | { |
2121 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2046 | struct btrfs_free_space *first = NULL; | 2122 | struct btrfs_free_space *first = NULL; |
2047 | struct btrfs_free_space *entry = NULL; | 2123 | struct btrfs_free_space *entry = NULL; |
2048 | struct btrfs_free_space *prev = NULL; | 2124 | struct btrfs_free_space *prev = NULL; |
@@ -2053,7 +2129,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2053 | u64 max_extent; | 2129 | u64 max_extent; |
2054 | u64 max_gap = 128 * 1024; | 2130 | u64 max_gap = 128 * 1024; |
2055 | 2131 | ||
2056 | entry = tree_search_offset(block_group, offset, 0, 1); | 2132 | entry = tree_search_offset(ctl, offset, 0, 1); |
2057 | if (!entry) | 2133 | if (!entry) |
2058 | return -ENOSPC; | 2134 | return -ENOSPC; |
2059 | 2135 | ||
@@ -2119,7 +2195,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2119 | if (entry->bitmap) | 2195 | if (entry->bitmap) |
2120 | continue; | 2196 | continue; |
2121 | 2197 | ||
2122 | rb_erase(&entry->offset_index, &block_group->free_space_offset); | 2198 | rb_erase(&entry->offset_index, &ctl->free_space_offset); |
2123 | ret = tree_insert_offset(&cluster->root, entry->offset, | 2199 | ret = tree_insert_offset(&cluster->root, entry->offset, |
2124 | &entry->offset_index, 0); | 2200 | &entry->offset_index, 0); |
2125 | BUG_ON(ret); | 2201 | BUG_ON(ret); |
@@ -2138,16 +2214,15 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |||
2138 | struct btrfs_free_cluster *cluster, | 2214 | struct btrfs_free_cluster *cluster, |
2139 | u64 offset, u64 bytes, u64 min_bytes) | 2215 | u64 offset, u64 bytes, u64 min_bytes) |
2140 | { | 2216 | { |
2217 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2141 | struct btrfs_free_space *entry; | 2218 | struct btrfs_free_space *entry; |
2142 | struct rb_node *node; | 2219 | struct rb_node *node; |
2143 | int ret = -ENOSPC; | 2220 | int ret = -ENOSPC; |
2144 | 2221 | ||
2145 | if (block_group->total_bitmaps == 0) | 2222 | if (ctl->total_bitmaps == 0) |
2146 | return -ENOSPC; | 2223 | return -ENOSPC; |
2147 | 2224 | ||
2148 | entry = tree_search_offset(block_group, | 2225 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); |
2149 | offset_to_bitmap(block_group, offset), | ||
2150 | 0, 1); | ||
2151 | if (!entry) | 2226 | if (!entry) |
2152 | return -ENOSPC; | 2227 | return -ENOSPC; |
2153 | 2228 | ||
@@ -2180,6 +2255,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2180 | struct btrfs_free_cluster *cluster, | 2255 | struct btrfs_free_cluster *cluster, |
2181 | u64 offset, u64 bytes, u64 empty_size) | 2256 | u64 offset, u64 bytes, u64 empty_size) |
2182 | { | 2257 | { |
2258 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2183 | u64 min_bytes; | 2259 | u64 min_bytes; |
2184 | int ret; | 2260 | int ret; |
2185 | 2261 | ||
@@ -2199,14 +2275,14 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2199 | } else | 2275 | } else |
2200 | min_bytes = max(bytes, (bytes + empty_size) >> 2); | 2276 | min_bytes = max(bytes, (bytes + empty_size) >> 2); |
2201 | 2277 | ||
2202 | spin_lock(&block_group->tree_lock); | 2278 | spin_lock(&ctl->tree_lock); |
2203 | 2279 | ||
2204 | /* | 2280 | /* |
2205 | * If we know we don't have enough space to make a cluster don't even | 2281 | * If we know we don't have enough space to make a cluster don't even |
2206 | * bother doing all the work to try and find one. | 2282 | * bother doing all the work to try and find one. |
2207 | */ | 2283 | */ |
2208 | if (block_group->free_space < min_bytes) { | 2284 | if (ctl->free_space < min_bytes) { |
2209 | spin_unlock(&block_group->tree_lock); | 2285 | spin_unlock(&ctl->tree_lock); |
2210 | return -ENOSPC; | 2286 | return -ENOSPC; |
2211 | } | 2287 | } |
2212 | 2288 | ||
@@ -2232,7 +2308,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2232 | } | 2308 | } |
2233 | out: | 2309 | out: |
2234 | spin_unlock(&cluster->lock); | 2310 | spin_unlock(&cluster->lock); |
2235 | spin_unlock(&block_group->tree_lock); | 2311 | spin_unlock(&ctl->tree_lock); |
2236 | 2312 | ||
2237 | return ret; | 2313 | return ret; |
2238 | } | 2314 | } |
@@ -2253,6 +2329,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) | |||
2253 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | 2329 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, |
2254 | u64 *trimmed, u64 start, u64 end, u64 minlen) | 2330 | u64 *trimmed, u64 start, u64 end, u64 minlen) |
2255 | { | 2331 | { |
2332 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2256 | struct btrfs_free_space *entry = NULL; | 2333 | struct btrfs_free_space *entry = NULL; |
2257 | struct btrfs_fs_info *fs_info = block_group->fs_info; | 2334 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
2258 | u64 bytes = 0; | 2335 | u64 bytes = 0; |
@@ -2262,52 +2339,50 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | |||
2262 | *trimmed = 0; | 2339 | *trimmed = 0; |
2263 | 2340 | ||
2264 | while (start < end) { | 2341 | while (start < end) { |
2265 | spin_lock(&block_group->tree_lock); | 2342 | spin_lock(&ctl->tree_lock); |
2266 | 2343 | ||
2267 | if (block_group->free_space < minlen) { | 2344 | if (ctl->free_space < minlen) { |
2268 | spin_unlock(&block_group->tree_lock); | 2345 | spin_unlock(&ctl->tree_lock); |
2269 | break; | 2346 | break; |
2270 | } | 2347 | } |
2271 | 2348 | ||
2272 | entry = tree_search_offset(block_group, start, 0, 1); | 2349 | entry = tree_search_offset(ctl, start, 0, 1); |
2273 | if (!entry) | 2350 | if (!entry) |
2274 | entry = tree_search_offset(block_group, | 2351 | entry = tree_search_offset(ctl, |
2275 | offset_to_bitmap(block_group, | 2352 | offset_to_bitmap(ctl, start), |
2276 | start), | ||
2277 | 1, 1); | 2353 | 1, 1); |
2278 | 2354 | ||
2279 | if (!entry || entry->offset >= end) { | 2355 | if (!entry || entry->offset >= end) { |
2280 | spin_unlock(&block_group->tree_lock); | 2356 | spin_unlock(&ctl->tree_lock); |
2281 | break; | 2357 | break; |
2282 | } | 2358 | } |
2283 | 2359 | ||
2284 | if (entry->bitmap) { | 2360 | if (entry->bitmap) { |
2285 | ret = search_bitmap(block_group, entry, &start, &bytes); | 2361 | ret = search_bitmap(ctl, entry, &start, &bytes); |
2286 | if (!ret) { | 2362 | if (!ret) { |
2287 | if (start >= end) { | 2363 | if (start >= end) { |
2288 | spin_unlock(&block_group->tree_lock); | 2364 | spin_unlock(&ctl->tree_lock); |
2289 | break; | 2365 | break; |
2290 | } | 2366 | } |
2291 | bytes = min(bytes, end - start); | 2367 | bytes = min(bytes, end - start); |
2292 | bitmap_clear_bits(block_group, entry, | 2368 | bitmap_clear_bits(ctl, entry, start, bytes); |
2293 | start, bytes); | ||
2294 | if (entry->bytes == 0) | 2369 | if (entry->bytes == 0) |
2295 | free_bitmap(block_group, entry); | 2370 | free_bitmap(ctl, entry); |
2296 | } else { | 2371 | } else { |
2297 | start = entry->offset + BITS_PER_BITMAP * | 2372 | start = entry->offset + BITS_PER_BITMAP * |
2298 | block_group->sectorsize; | 2373 | block_group->sectorsize; |
2299 | spin_unlock(&block_group->tree_lock); | 2374 | spin_unlock(&ctl->tree_lock); |
2300 | ret = 0; | 2375 | ret = 0; |
2301 | continue; | 2376 | continue; |
2302 | } | 2377 | } |
2303 | } else { | 2378 | } else { |
2304 | start = entry->offset; | 2379 | start = entry->offset; |
2305 | bytes = min(entry->bytes, end - start); | 2380 | bytes = min(entry->bytes, end - start); |
2306 | unlink_free_space(block_group, entry); | 2381 | unlink_free_space(ctl, entry); |
2307 | kmem_cache_free(btrfs_free_space_cachep, entry); | 2382 | kmem_cache_free(btrfs_free_space_cachep, entry); |
2308 | } | 2383 | } |
2309 | 2384 | ||
2310 | spin_unlock(&block_group->tree_lock); | 2385 | spin_unlock(&ctl->tree_lock); |
2311 | 2386 | ||
2312 | if (bytes >= minlen) { | 2387 | if (bytes >= minlen) { |
2313 | int update_ret; | 2388 | int update_ret; |
@@ -2319,8 +2394,7 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | |||
2319 | bytes, | 2394 | bytes, |
2320 | &actually_trimmed); | 2395 | &actually_trimmed); |
2321 | 2396 | ||
2322 | btrfs_add_free_space(block_group, | 2397 | btrfs_add_free_space(block_group, start, bytes); |
2323 | start, bytes); | ||
2324 | if (!update_ret) | 2398 | if (!update_ret) |
2325 | btrfs_update_reserved_bytes(block_group, | 2399 | btrfs_update_reserved_bytes(block_group, |
2326 | bytes, 0, 1); | 2400 | bytes, 0, 1); |
@@ -2342,3 +2416,145 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | |||
2342 | 2416 | ||
2343 | return ret; | 2417 | return ret; |
2344 | } | 2418 | } |
2419 | |||
2420 | /* | ||
2421 | * Find the left-most item in the cache tree, and then return the | ||
2422 | * smallest inode number in the item. | ||
2423 | * | ||
2424 | * Note: the returned inode number may not be the smallest one in | ||
2425 | * the tree, if the left-most item is a bitmap. | ||
2426 | */ | ||
2427 | u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root) | ||
2428 | { | ||
2429 | struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl; | ||
2430 | struct btrfs_free_space *entry = NULL; | ||
2431 | u64 ino = 0; | ||
2432 | |||
2433 | spin_lock(&ctl->tree_lock); | ||
2434 | |||
2435 | if (RB_EMPTY_ROOT(&ctl->free_space_offset)) | ||
2436 | goto out; | ||
2437 | |||
2438 | entry = rb_entry(rb_first(&ctl->free_space_offset), | ||
2439 | struct btrfs_free_space, offset_index); | ||
2440 | |||
2441 | if (!entry->bitmap) { | ||
2442 | ino = entry->offset; | ||
2443 | |||
2444 | unlink_free_space(ctl, entry); | ||
2445 | entry->offset++; | ||
2446 | entry->bytes--; | ||
2447 | if (!entry->bytes) | ||
2448 | kmem_cache_free(btrfs_free_space_cachep, entry); | ||
2449 | else | ||
2450 | link_free_space(ctl, entry); | ||
2451 | } else { | ||
2452 | u64 offset = 0; | ||
2453 | u64 count = 1; | ||
2454 | int ret; | ||
2455 | |||
2456 | ret = search_bitmap(ctl, entry, &offset, &count); | ||
2457 | BUG_ON(ret); | ||
2458 | |||
2459 | ino = offset; | ||
2460 | bitmap_clear_bits(ctl, entry, offset, 1); | ||
2461 | if (entry->bytes == 0) | ||
2462 | free_bitmap(ctl, entry); | ||
2463 | } | ||
2464 | out: | ||
2465 | spin_unlock(&ctl->tree_lock); | ||
2466 | |||
2467 | return ino; | ||
2468 | } | ||
2469 | |||
2470 | struct inode *lookup_free_ino_inode(struct btrfs_root *root, | ||
2471 | struct btrfs_path *path) | ||
2472 | { | ||
2473 | struct inode *inode = NULL; | ||
2474 | |||
2475 | spin_lock(&root->cache_lock); | ||
2476 | if (root->cache_inode) | ||
2477 | inode = igrab(root->cache_inode); | ||
2478 | spin_unlock(&root->cache_lock); | ||
2479 | if (inode) | ||
2480 | return inode; | ||
2481 | |||
2482 | inode = __lookup_free_space_inode(root, path, 0); | ||
2483 | if (IS_ERR(inode)) | ||
2484 | return inode; | ||
2485 | |||
2486 | spin_lock(&root->cache_lock); | ||
2487 | if (!root->fs_info->closing) | ||
2488 | root->cache_inode = igrab(inode); | ||
2489 | spin_unlock(&root->cache_lock); | ||
2490 | |||
2491 | return inode; | ||
2492 | } | ||
2493 | |||
2494 | int create_free_ino_inode(struct btrfs_root *root, | ||
2495 | struct btrfs_trans_handle *trans, | ||
2496 | struct btrfs_path *path) | ||
2497 | { | ||
2498 | return __create_free_space_inode(root, trans, path, | ||
2499 | BTRFS_FREE_INO_OBJECTID, 0); | ||
2500 | } | ||
2501 | |||
2502 | int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) | ||
2503 | { | ||
2504 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
2505 | struct btrfs_path *path; | ||
2506 | struct inode *inode; | ||
2507 | int ret = 0; | ||
2508 | u64 root_gen = btrfs_root_generation(&root->root_item); | ||
2509 | |||
2510 | /* | ||
2511 | * If we're unmounting then just return, since this does a search on the | ||
2512 | * normal root and not the commit root and we could deadlock. | ||
2513 | */ | ||
2514 | smp_mb(); | ||
2515 | if (fs_info->closing) | ||
2516 | return 0; | ||
2517 | |||
2518 | path = btrfs_alloc_path(); | ||
2519 | if (!path) | ||
2520 | return 0; | ||
2521 | |||
2522 | inode = lookup_free_ino_inode(root, path); | ||
2523 | if (IS_ERR(inode)) | ||
2524 | goto out; | ||
2525 | |||
2526 | if (root_gen != BTRFS_I(inode)->generation) | ||
2527 | goto out_put; | ||
2528 | |||
2529 | ret = __load_free_space_cache(root, inode, ctl, path, 0); | ||
2530 | |||
2531 | if (ret < 0) | ||
2532 | printk(KERN_ERR "btrfs: failed to load free ino cache for " | ||
2533 | "root %llu\n", root->root_key.objectid); | ||
2534 | out_put: | ||
2535 | iput(inode); | ||
2536 | out: | ||
2537 | btrfs_free_path(path); | ||
2538 | return ret; | ||
2539 | } | ||
2540 | |||
2541 | int btrfs_write_out_ino_cache(struct btrfs_root *root, | ||
2542 | struct btrfs_trans_handle *trans, | ||
2543 | struct btrfs_path *path) | ||
2544 | { | ||
2545 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
2546 | struct inode *inode; | ||
2547 | int ret; | ||
2548 | |||
2549 | inode = lookup_free_ino_inode(root, path); | ||
2550 | if (IS_ERR(inode)) | ||
2551 | return 0; | ||
2552 | |||
2553 | ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0); | ||
2554 | if (ret < 0) | ||
2555 | printk(KERN_ERR "btrfs: failed to write free ino cache " | ||
2556 | "for root %llu\n", root->root_key.objectid); | ||
2557 | |||
2558 | iput(inode); | ||
2559 | return ret; | ||
2560 | } | ||