diff options
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r-- | fs/btrfs/free-space-cache.c | 2158 |
1 files changed, 1743 insertions, 415 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index f488fac04d99..bf0d61567f3d 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -23,34 +23,937 @@ | |||
23 | #include "ctree.h" | 23 | #include "ctree.h" |
24 | #include "free-space-cache.h" | 24 | #include "free-space-cache.h" |
25 | #include "transaction.h" | 25 | #include "transaction.h" |
26 | #include "disk-io.h" | ||
27 | #include "extent_io.h" | ||
28 | #include "inode-map.h" | ||
26 | 29 | ||
27 | #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) | 30 | #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) |
28 | #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) | 31 | #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) |
29 | 32 | ||
30 | static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, | 33 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
34 | struct btrfs_free_space *info); | ||
35 | |||
36 | static struct inode *__lookup_free_space_inode(struct btrfs_root *root, | ||
37 | struct btrfs_path *path, | ||
38 | u64 offset) | ||
39 | { | ||
40 | struct btrfs_key key; | ||
41 | struct btrfs_key location; | ||
42 | struct btrfs_disk_key disk_key; | ||
43 | struct btrfs_free_space_header *header; | ||
44 | struct extent_buffer *leaf; | ||
45 | struct inode *inode = NULL; | ||
46 | int ret; | ||
47 | |||
48 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | ||
49 | key.offset = offset; | ||
50 | key.type = 0; | ||
51 | |||
52 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
53 | if (ret < 0) | ||
54 | return ERR_PTR(ret); | ||
55 | if (ret > 0) { | ||
56 | btrfs_release_path(path); | ||
57 | return ERR_PTR(-ENOENT); | ||
58 | } | ||
59 | |||
60 | leaf = path->nodes[0]; | ||
61 | header = btrfs_item_ptr(leaf, path->slots[0], | ||
62 | struct btrfs_free_space_header); | ||
63 | btrfs_free_space_key(leaf, header, &disk_key); | ||
64 | btrfs_disk_key_to_cpu(&location, &disk_key); | ||
65 | btrfs_release_path(path); | ||
66 | |||
67 | inode = btrfs_iget(root->fs_info->sb, &location, root, NULL); | ||
68 | if (!inode) | ||
69 | return ERR_PTR(-ENOENT); | ||
70 | if (IS_ERR(inode)) | ||
71 | return inode; | ||
72 | if (is_bad_inode(inode)) { | ||
73 | iput(inode); | ||
74 | return ERR_PTR(-ENOENT); | ||
75 | } | ||
76 | |||
77 | inode->i_mapping->flags &= ~__GFP_FS; | ||
78 | |||
79 | return inode; | ||
80 | } | ||
81 | |||
82 | struct inode *lookup_free_space_inode(struct btrfs_root *root, | ||
83 | struct btrfs_block_group_cache | ||
84 | *block_group, struct btrfs_path *path) | ||
85 | { | ||
86 | struct inode *inode = NULL; | ||
87 | |||
88 | spin_lock(&block_group->lock); | ||
89 | if (block_group->inode) | ||
90 | inode = igrab(block_group->inode); | ||
91 | spin_unlock(&block_group->lock); | ||
92 | if (inode) | ||
93 | return inode; | ||
94 | |||
95 | inode = __lookup_free_space_inode(root, path, | ||
96 | block_group->key.objectid); | ||
97 | if (IS_ERR(inode)) | ||
98 | return inode; | ||
99 | |||
100 | spin_lock(&block_group->lock); | ||
101 | if (!btrfs_fs_closing(root->fs_info)) { | ||
102 | block_group->inode = igrab(inode); | ||
103 | block_group->iref = 1; | ||
104 | } | ||
105 | spin_unlock(&block_group->lock); | ||
106 | |||
107 | return inode; | ||
108 | } | ||
109 | |||
110 | int __create_free_space_inode(struct btrfs_root *root, | ||
111 | struct btrfs_trans_handle *trans, | ||
112 | struct btrfs_path *path, u64 ino, u64 offset) | ||
113 | { | ||
114 | struct btrfs_key key; | ||
115 | struct btrfs_disk_key disk_key; | ||
116 | struct btrfs_free_space_header *header; | ||
117 | struct btrfs_inode_item *inode_item; | ||
118 | struct extent_buffer *leaf; | ||
119 | int ret; | ||
120 | |||
121 | ret = btrfs_insert_empty_inode(trans, root, path, ino); | ||
122 | if (ret) | ||
123 | return ret; | ||
124 | |||
125 | leaf = path->nodes[0]; | ||
126 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | ||
127 | struct btrfs_inode_item); | ||
128 | btrfs_item_key(leaf, &disk_key, path->slots[0]); | ||
129 | memset_extent_buffer(leaf, 0, (unsigned long)inode_item, | ||
130 | sizeof(*inode_item)); | ||
131 | btrfs_set_inode_generation(leaf, inode_item, trans->transid); | ||
132 | btrfs_set_inode_size(leaf, inode_item, 0); | ||
133 | btrfs_set_inode_nbytes(leaf, inode_item, 0); | ||
134 | btrfs_set_inode_uid(leaf, inode_item, 0); | ||
135 | btrfs_set_inode_gid(leaf, inode_item, 0); | ||
136 | btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600); | ||
137 | btrfs_set_inode_flags(leaf, inode_item, BTRFS_INODE_NOCOMPRESS | | ||
138 | BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM); | ||
139 | btrfs_set_inode_nlink(leaf, inode_item, 1); | ||
140 | btrfs_set_inode_transid(leaf, inode_item, trans->transid); | ||
141 | btrfs_set_inode_block_group(leaf, inode_item, offset); | ||
142 | btrfs_mark_buffer_dirty(leaf); | ||
143 | btrfs_release_path(path); | ||
144 | |||
145 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | ||
146 | key.offset = offset; | ||
147 | key.type = 0; | ||
148 | |||
149 | ret = btrfs_insert_empty_item(trans, root, path, &key, | ||
150 | sizeof(struct btrfs_free_space_header)); | ||
151 | if (ret < 0) { | ||
152 | btrfs_release_path(path); | ||
153 | return ret; | ||
154 | } | ||
155 | leaf = path->nodes[0]; | ||
156 | header = btrfs_item_ptr(leaf, path->slots[0], | ||
157 | struct btrfs_free_space_header); | ||
158 | memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header)); | ||
159 | btrfs_set_free_space_key(leaf, header, &disk_key); | ||
160 | btrfs_mark_buffer_dirty(leaf); | ||
161 | btrfs_release_path(path); | ||
162 | |||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | int create_free_space_inode(struct btrfs_root *root, | ||
167 | struct btrfs_trans_handle *trans, | ||
168 | struct btrfs_block_group_cache *block_group, | ||
169 | struct btrfs_path *path) | ||
170 | { | ||
171 | int ret; | ||
172 | u64 ino; | ||
173 | |||
174 | ret = btrfs_find_free_objectid(root, &ino); | ||
175 | if (ret < 0) | ||
176 | return ret; | ||
177 | |||
178 | return __create_free_space_inode(root, trans, path, ino, | ||
179 | block_group->key.objectid); | ||
180 | } | ||
181 | |||
182 | int btrfs_truncate_free_space_cache(struct btrfs_root *root, | ||
183 | struct btrfs_trans_handle *trans, | ||
184 | struct btrfs_path *path, | ||
185 | struct inode *inode) | ||
186 | { | ||
187 | loff_t oldsize; | ||
188 | int ret = 0; | ||
189 | |||
190 | trans->block_rsv = root->orphan_block_rsv; | ||
191 | ret = btrfs_block_rsv_check(trans, root, | ||
192 | root->orphan_block_rsv, | ||
193 | 0, 5); | ||
194 | if (ret) | ||
195 | return ret; | ||
196 | |||
197 | oldsize = i_size_read(inode); | ||
198 | btrfs_i_size_write(inode, 0); | ||
199 | truncate_pagecache(inode, oldsize, 0); | ||
200 | |||
201 | /* | ||
202 | * We don't need an orphan item because truncating the free space cache | ||
203 | * will never be split across transactions. | ||
204 | */ | ||
205 | ret = btrfs_truncate_inode_items(trans, root, inode, | ||
206 | 0, BTRFS_EXTENT_DATA_KEY); | ||
207 | if (ret) { | ||
208 | WARN_ON(1); | ||
209 | return ret; | ||
210 | } | ||
211 | |||
212 | ret = btrfs_update_inode(trans, root, inode); | ||
213 | return ret; | ||
214 | } | ||
215 | |||
216 | static int readahead_cache(struct inode *inode) | ||
217 | { | ||
218 | struct file_ra_state *ra; | ||
219 | unsigned long last_index; | ||
220 | |||
221 | ra = kzalloc(sizeof(*ra), GFP_NOFS); | ||
222 | if (!ra) | ||
223 | return -ENOMEM; | ||
224 | |||
225 | file_ra_state_init(ra, inode->i_mapping); | ||
226 | last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; | ||
227 | |||
228 | page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index); | ||
229 | |||
230 | kfree(ra); | ||
231 | |||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, | ||
236 | struct btrfs_free_space_ctl *ctl, | ||
237 | struct btrfs_path *path, u64 offset) | ||
238 | { | ||
239 | struct btrfs_free_space_header *header; | ||
240 | struct extent_buffer *leaf; | ||
241 | struct page *page; | ||
242 | u32 *checksums = NULL, *crc; | ||
243 | char *disk_crcs = NULL; | ||
244 | struct btrfs_key key; | ||
245 | struct list_head bitmaps; | ||
246 | u64 num_entries; | ||
247 | u64 num_bitmaps; | ||
248 | u64 generation; | ||
249 | u32 cur_crc = ~(u32)0; | ||
250 | pgoff_t index = 0; | ||
251 | unsigned long first_page_offset; | ||
252 | int num_checksums; | ||
253 | int ret = 0; | ||
254 | |||
255 | INIT_LIST_HEAD(&bitmaps); | ||
256 | |||
257 | /* Nothing in the space cache, goodbye */ | ||
258 | if (!i_size_read(inode)) | ||
259 | goto out; | ||
260 | |||
261 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | ||
262 | key.offset = offset; | ||
263 | key.type = 0; | ||
264 | |||
265 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
266 | if (ret < 0) | ||
267 | goto out; | ||
268 | else if (ret > 0) { | ||
269 | btrfs_release_path(path); | ||
270 | ret = 0; | ||
271 | goto out; | ||
272 | } | ||
273 | |||
274 | ret = -1; | ||
275 | |||
276 | leaf = path->nodes[0]; | ||
277 | header = btrfs_item_ptr(leaf, path->slots[0], | ||
278 | struct btrfs_free_space_header); | ||
279 | num_entries = btrfs_free_space_entries(leaf, header); | ||
280 | num_bitmaps = btrfs_free_space_bitmaps(leaf, header); | ||
281 | generation = btrfs_free_space_generation(leaf, header); | ||
282 | btrfs_release_path(path); | ||
283 | |||
284 | if (BTRFS_I(inode)->generation != generation) { | ||
285 | printk(KERN_ERR "btrfs: free space inode generation (%llu) did" | ||
286 | " not match free space cache generation (%llu)\n", | ||
287 | (unsigned long long)BTRFS_I(inode)->generation, | ||
288 | (unsigned long long)generation); | ||
289 | goto out; | ||
290 | } | ||
291 | |||
292 | if (!num_entries) | ||
293 | goto out; | ||
294 | |||
295 | /* Setup everything for doing checksumming */ | ||
296 | num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE; | ||
297 | checksums = crc = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS); | ||
298 | if (!checksums) | ||
299 | goto out; | ||
300 | first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64); | ||
301 | disk_crcs = kzalloc(first_page_offset, GFP_NOFS); | ||
302 | if (!disk_crcs) | ||
303 | goto out; | ||
304 | |||
305 | ret = readahead_cache(inode); | ||
306 | if (ret) | ||
307 | goto out; | ||
308 | |||
309 | while (1) { | ||
310 | struct btrfs_free_space_entry *entry; | ||
311 | struct btrfs_free_space *e; | ||
312 | void *addr; | ||
313 | unsigned long offset = 0; | ||
314 | unsigned long start_offset = 0; | ||
315 | int need_loop = 0; | ||
316 | |||
317 | if (!num_entries && !num_bitmaps) | ||
318 | break; | ||
319 | |||
320 | if (index == 0) { | ||
321 | start_offset = first_page_offset; | ||
322 | offset = start_offset; | ||
323 | } | ||
324 | |||
325 | page = grab_cache_page(inode->i_mapping, index); | ||
326 | if (!page) | ||
327 | goto free_cache; | ||
328 | |||
329 | if (!PageUptodate(page)) { | ||
330 | btrfs_readpage(NULL, page); | ||
331 | lock_page(page); | ||
332 | if (!PageUptodate(page)) { | ||
333 | unlock_page(page); | ||
334 | page_cache_release(page); | ||
335 | printk(KERN_ERR "btrfs: error reading free " | ||
336 | "space cache\n"); | ||
337 | goto free_cache; | ||
338 | } | ||
339 | } | ||
340 | addr = kmap(page); | ||
341 | |||
342 | if (index == 0) { | ||
343 | u64 *gen; | ||
344 | |||
345 | memcpy(disk_crcs, addr, first_page_offset); | ||
346 | gen = addr + (sizeof(u32) * num_checksums); | ||
347 | if (*gen != BTRFS_I(inode)->generation) { | ||
348 | printk(KERN_ERR "btrfs: space cache generation" | ||
349 | " (%llu) does not match inode (%llu)\n", | ||
350 | (unsigned long long)*gen, | ||
351 | (unsigned long long) | ||
352 | BTRFS_I(inode)->generation); | ||
353 | kunmap(page); | ||
354 | unlock_page(page); | ||
355 | page_cache_release(page); | ||
356 | goto free_cache; | ||
357 | } | ||
358 | crc = (u32 *)disk_crcs; | ||
359 | } | ||
360 | entry = addr + start_offset; | ||
361 | |||
362 | /* First lets check our crc before we do anything fun */ | ||
363 | cur_crc = ~(u32)0; | ||
364 | cur_crc = btrfs_csum_data(root, addr + start_offset, cur_crc, | ||
365 | PAGE_CACHE_SIZE - start_offset); | ||
366 | btrfs_csum_final(cur_crc, (char *)&cur_crc); | ||
367 | if (cur_crc != *crc) { | ||
368 | printk(KERN_ERR "btrfs: crc mismatch for page %lu\n", | ||
369 | index); | ||
370 | kunmap(page); | ||
371 | unlock_page(page); | ||
372 | page_cache_release(page); | ||
373 | goto free_cache; | ||
374 | } | ||
375 | crc++; | ||
376 | |||
377 | while (1) { | ||
378 | if (!num_entries) | ||
379 | break; | ||
380 | |||
381 | need_loop = 1; | ||
382 | e = kmem_cache_zalloc(btrfs_free_space_cachep, | ||
383 | GFP_NOFS); | ||
384 | if (!e) { | ||
385 | kunmap(page); | ||
386 | unlock_page(page); | ||
387 | page_cache_release(page); | ||
388 | goto free_cache; | ||
389 | } | ||
390 | |||
391 | e->offset = le64_to_cpu(entry->offset); | ||
392 | e->bytes = le64_to_cpu(entry->bytes); | ||
393 | if (!e->bytes) { | ||
394 | kunmap(page); | ||
395 | kmem_cache_free(btrfs_free_space_cachep, e); | ||
396 | unlock_page(page); | ||
397 | page_cache_release(page); | ||
398 | goto free_cache; | ||
399 | } | ||
400 | |||
401 | if (entry->type == BTRFS_FREE_SPACE_EXTENT) { | ||
402 | spin_lock(&ctl->tree_lock); | ||
403 | ret = link_free_space(ctl, e); | ||
404 | spin_unlock(&ctl->tree_lock); | ||
405 | if (ret) { | ||
406 | printk(KERN_ERR "Duplicate entries in " | ||
407 | "free space cache, dumping\n"); | ||
408 | kunmap(page); | ||
409 | unlock_page(page); | ||
410 | page_cache_release(page); | ||
411 | goto free_cache; | ||
412 | } | ||
413 | } else { | ||
414 | e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); | ||
415 | if (!e->bitmap) { | ||
416 | kunmap(page); | ||
417 | kmem_cache_free( | ||
418 | btrfs_free_space_cachep, e); | ||
419 | unlock_page(page); | ||
420 | page_cache_release(page); | ||
421 | goto free_cache; | ||
422 | } | ||
423 | spin_lock(&ctl->tree_lock); | ||
424 | ret = link_free_space(ctl, e); | ||
425 | ctl->total_bitmaps++; | ||
426 | ctl->op->recalc_thresholds(ctl); | ||
427 | spin_unlock(&ctl->tree_lock); | ||
428 | if (ret) { | ||
429 | printk(KERN_ERR "Duplicate entries in " | ||
430 | "free space cache, dumping\n"); | ||
431 | kunmap(page); | ||
432 | unlock_page(page); | ||
433 | page_cache_release(page); | ||
434 | goto free_cache; | ||
435 | } | ||
436 | list_add_tail(&e->list, &bitmaps); | ||
437 | } | ||
438 | |||
439 | num_entries--; | ||
440 | offset += sizeof(struct btrfs_free_space_entry); | ||
441 | if (offset + sizeof(struct btrfs_free_space_entry) >= | ||
442 | PAGE_CACHE_SIZE) | ||
443 | break; | ||
444 | entry++; | ||
445 | } | ||
446 | |||
447 | /* | ||
448 | * We read an entry out of this page, we need to move on to the | ||
449 | * next page. | ||
450 | */ | ||
451 | if (need_loop) { | ||
452 | kunmap(page); | ||
453 | goto next; | ||
454 | } | ||
455 | |||
456 | /* | ||
457 | * We add the bitmaps at the end of the entries in order that | ||
458 | * the bitmap entries are added to the cache. | ||
459 | */ | ||
460 | e = list_entry(bitmaps.next, struct btrfs_free_space, list); | ||
461 | list_del_init(&e->list); | ||
462 | memcpy(e->bitmap, addr, PAGE_CACHE_SIZE); | ||
463 | kunmap(page); | ||
464 | num_bitmaps--; | ||
465 | next: | ||
466 | unlock_page(page); | ||
467 | page_cache_release(page); | ||
468 | index++; | ||
469 | } | ||
470 | |||
471 | ret = 1; | ||
472 | out: | ||
473 | kfree(checksums); | ||
474 | kfree(disk_crcs); | ||
475 | return ret; | ||
476 | free_cache: | ||
477 | __btrfs_remove_free_space_cache(ctl); | ||
478 | goto out; | ||
479 | } | ||
480 | |||
481 | int load_free_space_cache(struct btrfs_fs_info *fs_info, | ||
482 | struct btrfs_block_group_cache *block_group) | ||
483 | { | ||
484 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
485 | struct btrfs_root *root = fs_info->tree_root; | ||
486 | struct inode *inode; | ||
487 | struct btrfs_path *path; | ||
488 | int ret; | ||
489 | bool matched; | ||
490 | u64 used = btrfs_block_group_used(&block_group->item); | ||
491 | |||
492 | /* | ||
493 | * If we're unmounting then just return, since this does a search on the | ||
494 | * normal root and not the commit root and we could deadlock. | ||
495 | */ | ||
496 | if (btrfs_fs_closing(fs_info)) | ||
497 | return 0; | ||
498 | |||
499 | /* | ||
500 | * If this block group has been marked to be cleared for one reason or | ||
501 | * another then we can't trust the on disk cache, so just return. | ||
502 | */ | ||
503 | spin_lock(&block_group->lock); | ||
504 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { | ||
505 | spin_unlock(&block_group->lock); | ||
506 | return 0; | ||
507 | } | ||
508 | spin_unlock(&block_group->lock); | ||
509 | |||
510 | path = btrfs_alloc_path(); | ||
511 | if (!path) | ||
512 | return 0; | ||
513 | |||
514 | inode = lookup_free_space_inode(root, block_group, path); | ||
515 | if (IS_ERR(inode)) { | ||
516 | btrfs_free_path(path); | ||
517 | return 0; | ||
518 | } | ||
519 | |||
520 | ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, | ||
521 | path, block_group->key.objectid); | ||
522 | btrfs_free_path(path); | ||
523 | if (ret <= 0) | ||
524 | goto out; | ||
525 | |||
526 | spin_lock(&ctl->tree_lock); | ||
527 | matched = (ctl->free_space == (block_group->key.offset - used - | ||
528 | block_group->bytes_super)); | ||
529 | spin_unlock(&ctl->tree_lock); | ||
530 | |||
531 | if (!matched) { | ||
532 | __btrfs_remove_free_space_cache(ctl); | ||
533 | printk(KERN_ERR "block group %llu has an wrong amount of free " | ||
534 | "space\n", block_group->key.objectid); | ||
535 | ret = -1; | ||
536 | } | ||
537 | out: | ||
538 | if (ret < 0) { | ||
539 | /* This cache is bogus, make sure it gets cleared */ | ||
540 | spin_lock(&block_group->lock); | ||
541 | block_group->disk_cache_state = BTRFS_DC_CLEAR; | ||
542 | spin_unlock(&block_group->lock); | ||
543 | ret = 0; | ||
544 | |||
545 | printk(KERN_ERR "btrfs: failed to load free space cache " | ||
546 | "for block group %llu\n", block_group->key.objectid); | ||
547 | } | ||
548 | |||
549 | iput(inode); | ||
550 | return ret; | ||
551 | } | ||
552 | |||
553 | int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | ||
554 | struct btrfs_free_space_ctl *ctl, | ||
555 | struct btrfs_block_group_cache *block_group, | ||
556 | struct btrfs_trans_handle *trans, | ||
557 | struct btrfs_path *path, u64 offset) | ||
558 | { | ||
559 | struct btrfs_free_space_header *header; | ||
560 | struct extent_buffer *leaf; | ||
561 | struct rb_node *node; | ||
562 | struct list_head *pos, *n; | ||
563 | struct page **pages; | ||
564 | struct page *page; | ||
565 | struct extent_state *cached_state = NULL; | ||
566 | struct btrfs_free_cluster *cluster = NULL; | ||
567 | struct extent_io_tree *unpin = NULL; | ||
568 | struct list_head bitmap_list; | ||
569 | struct btrfs_key key; | ||
570 | u64 start, end, len; | ||
571 | u64 bytes = 0; | ||
572 | u32 *crc, *checksums; | ||
573 | unsigned long first_page_offset; | ||
574 | int index = 0, num_pages = 0; | ||
575 | int entries = 0; | ||
576 | int bitmaps = 0; | ||
577 | int ret = -1; | ||
578 | bool next_page = false; | ||
579 | bool out_of_space = false; | ||
580 | |||
581 | INIT_LIST_HEAD(&bitmap_list); | ||
582 | |||
583 | node = rb_first(&ctl->free_space_offset); | ||
584 | if (!node) | ||
585 | return 0; | ||
586 | |||
587 | if (!i_size_read(inode)) | ||
588 | return -1; | ||
589 | |||
590 | num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> | ||
591 | PAGE_CACHE_SHIFT; | ||
592 | |||
593 | /* Since the first page has all of our checksums and our generation we | ||
594 | * need to calculate the offset into the page that we can start writing | ||
595 | * our entries. | ||
596 | */ | ||
597 | first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); | ||
598 | |||
599 | filemap_write_and_wait(inode->i_mapping); | ||
600 | btrfs_wait_ordered_range(inode, inode->i_size & | ||
601 | ~(root->sectorsize - 1), (u64)-1); | ||
602 | |||
603 | /* make sure we don't overflow that first page */ | ||
604 | if (first_page_offset + sizeof(struct btrfs_free_space_entry) >= PAGE_CACHE_SIZE) { | ||
605 | /* this is really the same as running out of space, where we also return 0 */ | ||
606 | printk(KERN_CRIT "Btrfs: free space cache was too big for the crc page\n"); | ||
607 | ret = 0; | ||
608 | goto out_update; | ||
609 | } | ||
610 | |||
611 | /* We need a checksum per page. */ | ||
612 | crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); | ||
613 | if (!crc) | ||
614 | return -1; | ||
615 | |||
616 | pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); | ||
617 | if (!pages) { | ||
618 | kfree(crc); | ||
619 | return -1; | ||
620 | } | ||
621 | |||
622 | /* Get the cluster for this block_group if it exists */ | ||
623 | if (block_group && !list_empty(&block_group->cluster_list)) | ||
624 | cluster = list_entry(block_group->cluster_list.next, | ||
625 | struct btrfs_free_cluster, | ||
626 | block_group_list); | ||
627 | |||
628 | /* | ||
629 | * We shouldn't have switched the pinned extents yet so this is the | ||
630 | * right one | ||
631 | */ | ||
632 | unpin = root->fs_info->pinned_extents; | ||
633 | |||
634 | /* | ||
635 | * Lock all pages first so we can lock the extent safely. | ||
636 | * | ||
637 | * NOTE: Because we hold the ref the entire time we're going to write to | ||
638 | * the page find_get_page should never fail, so we don't do a check | ||
639 | * after find_get_page at this point. Just putting this here so people | ||
640 | * know and don't freak out. | ||
641 | */ | ||
642 | while (index < num_pages) { | ||
643 | page = grab_cache_page(inode->i_mapping, index); | ||
644 | if (!page) { | ||
645 | int i; | ||
646 | |||
647 | for (i = 0; i < num_pages; i++) { | ||
648 | unlock_page(pages[i]); | ||
649 | page_cache_release(pages[i]); | ||
650 | } | ||
651 | goto out_free; | ||
652 | } | ||
653 | pages[index] = page; | ||
654 | index++; | ||
655 | } | ||
656 | |||
657 | index = 0; | ||
658 | lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, | ||
659 | 0, &cached_state, GFP_NOFS); | ||
660 | |||
661 | /* | ||
662 | * When searching for pinned extents, we need to start at our start | ||
663 | * offset. | ||
664 | */ | ||
665 | if (block_group) | ||
666 | start = block_group->key.objectid; | ||
667 | |||
668 | /* Write out the extent entries */ | ||
669 | do { | ||
670 | struct btrfs_free_space_entry *entry; | ||
671 | void *addr; | ||
672 | unsigned long offset = 0; | ||
673 | unsigned long start_offset = 0; | ||
674 | |||
675 | next_page = false; | ||
676 | |||
677 | if (index == 0) { | ||
678 | start_offset = first_page_offset; | ||
679 | offset = start_offset; | ||
680 | } | ||
681 | |||
682 | if (index >= num_pages) { | ||
683 | out_of_space = true; | ||
684 | break; | ||
685 | } | ||
686 | |||
687 | page = pages[index]; | ||
688 | |||
689 | addr = kmap(page); | ||
690 | entry = addr + start_offset; | ||
691 | |||
692 | memset(addr, 0, PAGE_CACHE_SIZE); | ||
693 | while (node && !next_page) { | ||
694 | struct btrfs_free_space *e; | ||
695 | |||
696 | e = rb_entry(node, struct btrfs_free_space, offset_index); | ||
697 | entries++; | ||
698 | |||
699 | entry->offset = cpu_to_le64(e->offset); | ||
700 | entry->bytes = cpu_to_le64(e->bytes); | ||
701 | if (e->bitmap) { | ||
702 | entry->type = BTRFS_FREE_SPACE_BITMAP; | ||
703 | list_add_tail(&e->list, &bitmap_list); | ||
704 | bitmaps++; | ||
705 | } else { | ||
706 | entry->type = BTRFS_FREE_SPACE_EXTENT; | ||
707 | } | ||
708 | node = rb_next(node); | ||
709 | if (!node && cluster) { | ||
710 | node = rb_first(&cluster->root); | ||
711 | cluster = NULL; | ||
712 | } | ||
713 | offset += sizeof(struct btrfs_free_space_entry); | ||
714 | if (offset + sizeof(struct btrfs_free_space_entry) >= | ||
715 | PAGE_CACHE_SIZE) | ||
716 | next_page = true; | ||
717 | entry++; | ||
718 | } | ||
719 | |||
720 | /* | ||
721 | * We want to add any pinned extents to our free space cache | ||
722 | * so we don't leak the space | ||
723 | */ | ||
724 | while (block_group && !next_page && | ||
725 | (start < block_group->key.objectid + | ||
726 | block_group->key.offset)) { | ||
727 | ret = find_first_extent_bit(unpin, start, &start, &end, | ||
728 | EXTENT_DIRTY); | ||
729 | if (ret) { | ||
730 | ret = 0; | ||
731 | break; | ||
732 | } | ||
733 | |||
734 | /* This pinned extent is out of our range */ | ||
735 | if (start >= block_group->key.objectid + | ||
736 | block_group->key.offset) | ||
737 | break; | ||
738 | |||
739 | len = block_group->key.objectid + | ||
740 | block_group->key.offset - start; | ||
741 | len = min(len, end + 1 - start); | ||
742 | |||
743 | entries++; | ||
744 | entry->offset = cpu_to_le64(start); | ||
745 | entry->bytes = cpu_to_le64(len); | ||
746 | entry->type = BTRFS_FREE_SPACE_EXTENT; | ||
747 | |||
748 | start = end + 1; | ||
749 | offset += sizeof(struct btrfs_free_space_entry); | ||
750 | if (offset + sizeof(struct btrfs_free_space_entry) >= | ||
751 | PAGE_CACHE_SIZE) | ||
752 | next_page = true; | ||
753 | entry++; | ||
754 | } | ||
755 | *crc = ~(u32)0; | ||
756 | *crc = btrfs_csum_data(root, addr + start_offset, *crc, | ||
757 | PAGE_CACHE_SIZE - start_offset); | ||
758 | kunmap(page); | ||
759 | |||
760 | btrfs_csum_final(*crc, (char *)crc); | ||
761 | crc++; | ||
762 | |||
763 | bytes += PAGE_CACHE_SIZE; | ||
764 | |||
765 | index++; | ||
766 | } while (node || next_page); | ||
767 | |||
768 | /* Write out the bitmaps */ | ||
769 | list_for_each_safe(pos, n, &bitmap_list) { | ||
770 | void *addr; | ||
771 | struct btrfs_free_space *entry = | ||
772 | list_entry(pos, struct btrfs_free_space, list); | ||
773 | |||
774 | if (index >= num_pages) { | ||
775 | out_of_space = true; | ||
776 | break; | ||
777 | } | ||
778 | page = pages[index]; | ||
779 | |||
780 | addr = kmap(page); | ||
781 | memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE); | ||
782 | *crc = ~(u32)0; | ||
783 | *crc = btrfs_csum_data(root, addr, *crc, PAGE_CACHE_SIZE); | ||
784 | kunmap(page); | ||
785 | btrfs_csum_final(*crc, (char *)crc); | ||
786 | crc++; | ||
787 | bytes += PAGE_CACHE_SIZE; | ||
788 | |||
789 | list_del_init(&entry->list); | ||
790 | index++; | ||
791 | } | ||
792 | |||
793 | if (out_of_space) { | ||
794 | btrfs_drop_pages(pages, num_pages); | ||
795 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, | ||
796 | i_size_read(inode) - 1, &cached_state, | ||
797 | GFP_NOFS); | ||
798 | ret = 0; | ||
799 | goto out_free; | ||
800 | } | ||
801 | |||
802 | /* Zero out the rest of the pages just to make sure */ | ||
803 | while (index < num_pages) { | ||
804 | void *addr; | ||
805 | |||
806 | page = pages[index]; | ||
807 | addr = kmap(page); | ||
808 | memset(addr, 0, PAGE_CACHE_SIZE); | ||
809 | kunmap(page); | ||
810 | bytes += PAGE_CACHE_SIZE; | ||
811 | index++; | ||
812 | } | ||
813 | |||
814 | /* Write the checksums and trans id to the first page */ | ||
815 | { | ||
816 | void *addr; | ||
817 | u64 *gen; | ||
818 | |||
819 | page = pages[0]; | ||
820 | |||
821 | addr = kmap(page); | ||
822 | memcpy(addr, checksums, sizeof(u32) * num_pages); | ||
823 | gen = addr + (sizeof(u32) * num_pages); | ||
824 | *gen = trans->transid; | ||
825 | kunmap(page); | ||
826 | } | ||
827 | |||
828 | ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0, | ||
829 | bytes, &cached_state); | ||
830 | btrfs_drop_pages(pages, num_pages); | ||
831 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, | ||
832 | i_size_read(inode) - 1, &cached_state, GFP_NOFS); | ||
833 | |||
834 | if (ret) { | ||
835 | ret = 0; | ||
836 | goto out_free; | ||
837 | } | ||
838 | |||
839 | BTRFS_I(inode)->generation = trans->transid; | ||
840 | |||
841 | filemap_write_and_wait(inode->i_mapping); | ||
842 | |||
843 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | ||
844 | key.offset = offset; | ||
845 | key.type = 0; | ||
846 | |||
847 | ret = btrfs_search_slot(trans, root, &key, path, 1, 1); | ||
848 | if (ret < 0) { | ||
849 | ret = -1; | ||
850 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, | ||
851 | EXTENT_DIRTY | EXTENT_DELALLOC | | ||
852 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); | ||
853 | goto out_free; | ||
854 | } | ||
855 | leaf = path->nodes[0]; | ||
856 | if (ret > 0) { | ||
857 | struct btrfs_key found_key; | ||
858 | BUG_ON(!path->slots[0]); | ||
859 | path->slots[0]--; | ||
860 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | ||
861 | if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || | ||
862 | found_key.offset != offset) { | ||
863 | ret = -1; | ||
864 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, | ||
865 | EXTENT_DIRTY | EXTENT_DELALLOC | | ||
866 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, | ||
867 | GFP_NOFS); | ||
868 | btrfs_release_path(path); | ||
869 | goto out_free; | ||
870 | } | ||
871 | } | ||
872 | header = btrfs_item_ptr(leaf, path->slots[0], | ||
873 | struct btrfs_free_space_header); | ||
874 | btrfs_set_free_space_entries(leaf, header, entries); | ||
875 | btrfs_set_free_space_bitmaps(leaf, header, bitmaps); | ||
876 | btrfs_set_free_space_generation(leaf, header, trans->transid); | ||
877 | btrfs_mark_buffer_dirty(leaf); | ||
878 | btrfs_release_path(path); | ||
879 | |||
880 | ret = 1; | ||
881 | |||
882 | out_free: | ||
883 | kfree(checksums); | ||
884 | kfree(pages); | ||
885 | |||
886 | out_update: | ||
887 | if (ret != 1) { | ||
888 | invalidate_inode_pages2_range(inode->i_mapping, 0, index); | ||
889 | BTRFS_I(inode)->generation = 0; | ||
890 | } | ||
891 | btrfs_update_inode(trans, root, inode); | ||
892 | return ret; | ||
893 | } | ||
894 | |||
895 | int btrfs_write_out_cache(struct btrfs_root *root, | ||
896 | struct btrfs_trans_handle *trans, | ||
897 | struct btrfs_block_group_cache *block_group, | ||
898 | struct btrfs_path *path) | ||
899 | { | ||
900 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
901 | struct inode *inode; | ||
902 | int ret = 0; | ||
903 | |||
904 | root = root->fs_info->tree_root; | ||
905 | |||
906 | spin_lock(&block_group->lock); | ||
907 | if (block_group->disk_cache_state < BTRFS_DC_SETUP) { | ||
908 | spin_unlock(&block_group->lock); | ||
909 | return 0; | ||
910 | } | ||
911 | spin_unlock(&block_group->lock); | ||
912 | |||
913 | inode = lookup_free_space_inode(root, block_group, path); | ||
914 | if (IS_ERR(inode)) | ||
915 | return 0; | ||
916 | |||
917 | ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, | ||
918 | path, block_group->key.objectid); | ||
919 | if (ret < 0) { | ||
920 | spin_lock(&block_group->lock); | ||
921 | block_group->disk_cache_state = BTRFS_DC_ERROR; | ||
922 | spin_unlock(&block_group->lock); | ||
923 | ret = 0; | ||
924 | |||
925 | printk(KERN_ERR "btrfs: failed to write free space cace " | ||
926 | "for block group %llu\n", block_group->key.objectid); | ||
927 | } | ||
928 | |||
929 | iput(inode); | ||
930 | return ret; | ||
931 | } | ||
932 | |||
933 | static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit, | ||
31 | u64 offset) | 934 | u64 offset) |
32 | { | 935 | { |
33 | BUG_ON(offset < bitmap_start); | 936 | BUG_ON(offset < bitmap_start); |
34 | offset -= bitmap_start; | 937 | offset -= bitmap_start; |
35 | return (unsigned long)(div64_u64(offset, sectorsize)); | 938 | return (unsigned long)(div_u64(offset, unit)); |
36 | } | 939 | } |
37 | 940 | ||
38 | static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize) | 941 | static inline unsigned long bytes_to_bits(u64 bytes, u32 unit) |
39 | { | 942 | { |
40 | return (unsigned long)(div64_u64(bytes, sectorsize)); | 943 | return (unsigned long)(div_u64(bytes, unit)); |
41 | } | 944 | } |
42 | 945 | ||
43 | static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group, | 946 | static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl, |
44 | u64 offset) | 947 | u64 offset) |
45 | { | 948 | { |
46 | u64 bitmap_start; | 949 | u64 bitmap_start; |
47 | u64 bytes_per_bitmap; | 950 | u64 bytes_per_bitmap; |
48 | 951 | ||
49 | bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize; | 952 | bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; |
50 | bitmap_start = offset - block_group->key.objectid; | 953 | bitmap_start = offset - ctl->start; |
51 | bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); | 954 | bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); |
52 | bitmap_start *= bytes_per_bitmap; | 955 | bitmap_start *= bytes_per_bitmap; |
53 | bitmap_start += block_group->key.objectid; | 956 | bitmap_start += ctl->start; |
54 | 957 | ||
55 | return bitmap_start; | 958 | return bitmap_start; |
56 | } | 959 | } |
@@ -85,10 +988,16 @@ static int tree_insert_offset(struct rb_root *root, u64 offset, | |||
85 | * logically. | 988 | * logically. |
86 | */ | 989 | */ |
87 | if (bitmap) { | 990 | if (bitmap) { |
88 | WARN_ON(info->bitmap); | 991 | if (info->bitmap) { |
992 | WARN_ON_ONCE(1); | ||
993 | return -EEXIST; | ||
994 | } | ||
89 | p = &(*p)->rb_right; | 995 | p = &(*p)->rb_right; |
90 | } else { | 996 | } else { |
91 | WARN_ON(!info->bitmap); | 997 | if (!info->bitmap) { |
998 | WARN_ON_ONCE(1); | ||
999 | return -EEXIST; | ||
1000 | } | ||
92 | p = &(*p)->rb_left; | 1001 | p = &(*p)->rb_left; |
93 | } | 1002 | } |
94 | } | 1003 | } |
@@ -108,10 +1017,10 @@ static int tree_insert_offset(struct rb_root *root, u64 offset, | |||
108 | * offset. | 1017 | * offset. |
109 | */ | 1018 | */ |
110 | static struct btrfs_free_space * | 1019 | static struct btrfs_free_space * |
111 | tree_search_offset(struct btrfs_block_group_cache *block_group, | 1020 | tree_search_offset(struct btrfs_free_space_ctl *ctl, |
112 | u64 offset, int bitmap_only, int fuzzy) | 1021 | u64 offset, int bitmap_only, int fuzzy) |
113 | { | 1022 | { |
114 | struct rb_node *n = block_group->free_space_offset.rb_node; | 1023 | struct rb_node *n = ctl->free_space_offset.rb_node; |
115 | struct btrfs_free_space *entry, *prev = NULL; | 1024 | struct btrfs_free_space *entry, *prev = NULL; |
116 | 1025 | ||
117 | /* find entry that is closest to the 'offset' */ | 1026 | /* find entry that is closest to the 'offset' */ |
@@ -207,8 +1116,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, | |||
207 | break; | 1116 | break; |
208 | } | 1117 | } |
209 | } | 1118 | } |
210 | if (entry->offset + BITS_PER_BITMAP * | 1119 | if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) |
211 | block_group->sectorsize > offset) | ||
212 | return entry; | 1120 | return entry; |
213 | } else if (entry->offset + entry->bytes > offset) | 1121 | } else if (entry->offset + entry->bytes > offset) |
214 | return entry; | 1122 | return entry; |
@@ -219,7 +1127,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, | |||
219 | while (1) { | 1127 | while (1) { |
220 | if (entry->bitmap) { | 1128 | if (entry->bitmap) { |
221 | if (entry->offset + BITS_PER_BITMAP * | 1129 | if (entry->offset + BITS_PER_BITMAP * |
222 | block_group->sectorsize > offset) | 1130 | ctl->unit > offset) |
223 | break; | 1131 | break; |
224 | } else { | 1132 | } else { |
225 | if (entry->offset + entry->bytes > offset) | 1133 | if (entry->offset + entry->bytes > offset) |
@@ -234,53 +1142,69 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, | |||
234 | return entry; | 1142 | return entry; |
235 | } | 1143 | } |
236 | 1144 | ||
237 | static void unlink_free_space(struct btrfs_block_group_cache *block_group, | 1145 | static inline void |
1146 | __unlink_free_space(struct btrfs_free_space_ctl *ctl, | ||
1147 | struct btrfs_free_space *info) | ||
1148 | { | ||
1149 | rb_erase(&info->offset_index, &ctl->free_space_offset); | ||
1150 | ctl->free_extents--; | ||
1151 | } | ||
1152 | |||
1153 | static void unlink_free_space(struct btrfs_free_space_ctl *ctl, | ||
238 | struct btrfs_free_space *info) | 1154 | struct btrfs_free_space *info) |
239 | { | 1155 | { |
240 | rb_erase(&info->offset_index, &block_group->free_space_offset); | 1156 | __unlink_free_space(ctl, info); |
241 | block_group->free_extents--; | 1157 | ctl->free_space -= info->bytes; |
242 | block_group->free_space -= info->bytes; | ||
243 | } | 1158 | } |
244 | 1159 | ||
245 | static int link_free_space(struct btrfs_block_group_cache *block_group, | 1160 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
246 | struct btrfs_free_space *info) | 1161 | struct btrfs_free_space *info) |
247 | { | 1162 | { |
248 | int ret = 0; | 1163 | int ret = 0; |
249 | 1164 | ||
250 | BUG_ON(!info->bitmap && !info->bytes); | 1165 | BUG_ON(!info->bitmap && !info->bytes); |
251 | ret = tree_insert_offset(&block_group->free_space_offset, info->offset, | 1166 | ret = tree_insert_offset(&ctl->free_space_offset, info->offset, |
252 | &info->offset_index, (info->bitmap != NULL)); | 1167 | &info->offset_index, (info->bitmap != NULL)); |
253 | if (ret) | 1168 | if (ret) |
254 | return ret; | 1169 | return ret; |
255 | 1170 | ||
256 | block_group->free_space += info->bytes; | 1171 | ctl->free_space += info->bytes; |
257 | block_group->free_extents++; | 1172 | ctl->free_extents++; |
258 | return ret; | 1173 | return ret; |
259 | } | 1174 | } |
260 | 1175 | ||
261 | static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) | 1176 | static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) |
262 | { | 1177 | { |
1178 | struct btrfs_block_group_cache *block_group = ctl->private; | ||
263 | u64 max_bytes; | 1179 | u64 max_bytes; |
264 | u64 bitmap_bytes; | 1180 | u64 bitmap_bytes; |
265 | u64 extent_bytes; | 1181 | u64 extent_bytes; |
1182 | u64 size = block_group->key.offset; | ||
1183 | u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; | ||
1184 | int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); | ||
1185 | |||
1186 | BUG_ON(ctl->total_bitmaps > max_bitmaps); | ||
266 | 1187 | ||
267 | /* | 1188 | /* |
268 | * The goal is to keep the total amount of memory used per 1gb of space | 1189 | * The goal is to keep the total amount of memory used per 1gb of space |
269 | * at or below 32k, so we need to adjust how much memory we allow to be | 1190 | * at or below 32k, so we need to adjust how much memory we allow to be |
270 | * used by extent based free space tracking | 1191 | * used by extent based free space tracking |
271 | */ | 1192 | */ |
272 | max_bytes = MAX_CACHE_BYTES_PER_GIG * | 1193 | if (size < 1024 * 1024 * 1024) |
273 | (div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); | 1194 | max_bytes = MAX_CACHE_BYTES_PER_GIG; |
1195 | else | ||
1196 | max_bytes = MAX_CACHE_BYTES_PER_GIG * | ||
1197 | div64_u64(size, 1024 * 1024 * 1024); | ||
274 | 1198 | ||
275 | /* | 1199 | /* |
276 | * we want to account for 1 more bitmap than what we have so we can make | 1200 | * we want to account for 1 more bitmap than what we have so we can make |
277 | * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as | 1201 | * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as |
278 | * we add more bitmaps. | 1202 | * we add more bitmaps. |
279 | */ | 1203 | */ |
280 | bitmap_bytes = (block_group->total_bitmaps + 1) * PAGE_CACHE_SIZE; | 1204 | bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE; |
281 | 1205 | ||
282 | if (bitmap_bytes >= max_bytes) { | 1206 | if (bitmap_bytes >= max_bytes) { |
283 | block_group->extents_thresh = 0; | 1207 | ctl->extents_thresh = 0; |
284 | return; | 1208 | return; |
285 | } | 1209 | } |
286 | 1210 | ||
@@ -291,47 +1215,43 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) | |||
291 | extent_bytes = max_bytes - bitmap_bytes; | 1215 | extent_bytes = max_bytes - bitmap_bytes; |
292 | extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2)); | 1216 | extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2)); |
293 | 1217 | ||
294 | block_group->extents_thresh = | 1218 | ctl->extents_thresh = |
295 | div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); | 1219 | div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); |
296 | } | 1220 | } |
297 | 1221 | ||
298 | static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, | 1222 | static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, |
299 | struct btrfs_free_space *info, u64 offset, | 1223 | struct btrfs_free_space *info, u64 offset, |
300 | u64 bytes) | 1224 | u64 bytes) |
301 | { | 1225 | { |
302 | unsigned long start, end; | 1226 | unsigned long start, count; |
303 | unsigned long i; | ||
304 | 1227 | ||
305 | start = offset_to_bit(info->offset, block_group->sectorsize, offset); | 1228 | start = offset_to_bit(info->offset, ctl->unit, offset); |
306 | end = start + bytes_to_bits(bytes, block_group->sectorsize); | 1229 | count = bytes_to_bits(bytes, ctl->unit); |
307 | BUG_ON(end > BITS_PER_BITMAP); | 1230 | BUG_ON(start + count > BITS_PER_BITMAP); |
308 | 1231 | ||
309 | for (i = start; i < end; i++) | 1232 | bitmap_clear(info->bitmap, start, count); |
310 | clear_bit(i, info->bitmap); | ||
311 | 1233 | ||
312 | info->bytes -= bytes; | 1234 | info->bytes -= bytes; |
313 | block_group->free_space -= bytes; | 1235 | ctl->free_space -= bytes; |
314 | } | 1236 | } |
315 | 1237 | ||
316 | static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, | 1238 | static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, |
317 | struct btrfs_free_space *info, u64 offset, | 1239 | struct btrfs_free_space *info, u64 offset, |
318 | u64 bytes) | 1240 | u64 bytes) |
319 | { | 1241 | { |
320 | unsigned long start, end; | 1242 | unsigned long start, count; |
321 | unsigned long i; | ||
322 | 1243 | ||
323 | start = offset_to_bit(info->offset, block_group->sectorsize, offset); | 1244 | start = offset_to_bit(info->offset, ctl->unit, offset); |
324 | end = start + bytes_to_bits(bytes, block_group->sectorsize); | 1245 | count = bytes_to_bits(bytes, ctl->unit); |
325 | BUG_ON(end > BITS_PER_BITMAP); | 1246 | BUG_ON(start + count > BITS_PER_BITMAP); |
326 | 1247 | ||
327 | for (i = start; i < end; i++) | 1248 | bitmap_set(info->bitmap, start, count); |
328 | set_bit(i, info->bitmap); | ||
329 | 1249 | ||
330 | info->bytes += bytes; | 1250 | info->bytes += bytes; |
331 | block_group->free_space += bytes; | 1251 | ctl->free_space += bytes; |
332 | } | 1252 | } |
333 | 1253 | ||
334 | static int search_bitmap(struct btrfs_block_group_cache *block_group, | 1254 | static int search_bitmap(struct btrfs_free_space_ctl *ctl, |
335 | struct btrfs_free_space *bitmap_info, u64 *offset, | 1255 | struct btrfs_free_space *bitmap_info, u64 *offset, |
336 | u64 *bytes) | 1256 | u64 *bytes) |
337 | { | 1257 | { |
@@ -339,9 +1259,9 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group, | |||
339 | unsigned long bits, i; | 1259 | unsigned long bits, i; |
340 | unsigned long next_zero; | 1260 | unsigned long next_zero; |
341 | 1261 | ||
342 | i = offset_to_bit(bitmap_info->offset, block_group->sectorsize, | 1262 | i = offset_to_bit(bitmap_info->offset, ctl->unit, |
343 | max_t(u64, *offset, bitmap_info->offset)); | 1263 | max_t(u64, *offset, bitmap_info->offset)); |
344 | bits = bytes_to_bits(*bytes, block_group->sectorsize); | 1264 | bits = bytes_to_bits(*bytes, ctl->unit); |
345 | 1265 | ||
346 | for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); | 1266 | for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); |
347 | i < BITS_PER_BITMAP; | 1267 | i < BITS_PER_BITMAP; |
@@ -356,29 +1276,25 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group, | |||
356 | } | 1276 | } |
357 | 1277 | ||
358 | if (found_bits) { | 1278 | if (found_bits) { |
359 | *offset = (u64)(i * block_group->sectorsize) + | 1279 | *offset = (u64)(i * ctl->unit) + bitmap_info->offset; |
360 | bitmap_info->offset; | 1280 | *bytes = (u64)(found_bits) * ctl->unit; |
361 | *bytes = (u64)(found_bits) * block_group->sectorsize; | ||
362 | return 0; | 1281 | return 0; |
363 | } | 1282 | } |
364 | 1283 | ||
365 | return -1; | 1284 | return -1; |
366 | } | 1285 | } |
367 | 1286 | ||
368 | static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache | 1287 | static struct btrfs_free_space * |
369 | *block_group, u64 *offset, | 1288 | find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes) |
370 | u64 *bytes, int debug) | ||
371 | { | 1289 | { |
372 | struct btrfs_free_space *entry; | 1290 | struct btrfs_free_space *entry; |
373 | struct rb_node *node; | 1291 | struct rb_node *node; |
374 | int ret; | 1292 | int ret; |
375 | 1293 | ||
376 | if (!block_group->free_space_offset.rb_node) | 1294 | if (!ctl->free_space_offset.rb_node) |
377 | return NULL; | 1295 | return NULL; |
378 | 1296 | ||
379 | entry = tree_search_offset(block_group, | 1297 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); |
380 | offset_to_bitmap(block_group, *offset), | ||
381 | 0, 1); | ||
382 | if (!entry) | 1298 | if (!entry) |
383 | return NULL; | 1299 | return NULL; |
384 | 1300 | ||
@@ -388,7 +1304,7 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache | |||
388 | continue; | 1304 | continue; |
389 | 1305 | ||
390 | if (entry->bitmap) { | 1306 | if (entry->bitmap) { |
391 | ret = search_bitmap(block_group, entry, offset, bytes); | 1307 | ret = search_bitmap(ctl, entry, offset, bytes); |
392 | if (!ret) | 1308 | if (!ret) |
393 | return entry; | 1309 | return entry; |
394 | continue; | 1310 | continue; |
@@ -402,23 +1318,28 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache | |||
402 | return NULL; | 1318 | return NULL; |
403 | } | 1319 | } |
404 | 1320 | ||
405 | static void add_new_bitmap(struct btrfs_block_group_cache *block_group, | 1321 | static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, |
406 | struct btrfs_free_space *info, u64 offset) | 1322 | struct btrfs_free_space *info, u64 offset) |
407 | { | 1323 | { |
408 | u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; | 1324 | info->offset = offset_to_bitmap(ctl, offset); |
409 | int max_bitmaps = (int)div64_u64(block_group->key.offset + | ||
410 | bytes_per_bg - 1, bytes_per_bg); | ||
411 | BUG_ON(block_group->total_bitmaps >= max_bitmaps); | ||
412 | |||
413 | info->offset = offset_to_bitmap(block_group, offset); | ||
414 | info->bytes = 0; | 1325 | info->bytes = 0; |
415 | link_free_space(block_group, info); | 1326 | link_free_space(ctl, info); |
416 | block_group->total_bitmaps++; | 1327 | ctl->total_bitmaps++; |
417 | 1328 | ||
418 | recalculate_thresholds(block_group); | 1329 | ctl->op->recalc_thresholds(ctl); |
419 | } | 1330 | } |
420 | 1331 | ||
421 | static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, | 1332 | static void free_bitmap(struct btrfs_free_space_ctl *ctl, |
1333 | struct btrfs_free_space *bitmap_info) | ||
1334 | { | ||
1335 | unlink_free_space(ctl, bitmap_info); | ||
1336 | kfree(bitmap_info->bitmap); | ||
1337 | kmem_cache_free(btrfs_free_space_cachep, bitmap_info); | ||
1338 | ctl->total_bitmaps--; | ||
1339 | ctl->op->recalc_thresholds(ctl); | ||
1340 | } | ||
1341 | |||
1342 | static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl, | ||
422 | struct btrfs_free_space *bitmap_info, | 1343 | struct btrfs_free_space *bitmap_info, |
423 | u64 *offset, u64 *bytes) | 1344 | u64 *offset, u64 *bytes) |
424 | { | 1345 | { |
@@ -427,8 +1348,7 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro | |||
427 | int ret; | 1348 | int ret; |
428 | 1349 | ||
429 | again: | 1350 | again: |
430 | end = bitmap_info->offset + | 1351 | end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; |
431 | (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; | ||
432 | 1352 | ||
433 | /* | 1353 | /* |
434 | * XXX - this can go away after a few releases. | 1354 | * XXX - this can go away after a few releases. |
@@ -442,29 +1362,23 @@ again: | |||
442 | */ | 1362 | */ |
443 | search_start = *offset; | 1363 | search_start = *offset; |
444 | search_bytes = *bytes; | 1364 | search_bytes = *bytes; |
445 | ret = search_bitmap(block_group, bitmap_info, &search_start, | 1365 | search_bytes = min(search_bytes, end - search_start + 1); |
446 | &search_bytes); | 1366 | ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); |
447 | BUG_ON(ret < 0 || search_start != *offset); | 1367 | BUG_ON(ret < 0 || search_start != *offset); |
448 | 1368 | ||
449 | if (*offset > bitmap_info->offset && *offset + *bytes > end) { | 1369 | if (*offset > bitmap_info->offset && *offset + *bytes > end) { |
450 | bitmap_clear_bits(block_group, bitmap_info, *offset, | 1370 | bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1); |
451 | end - *offset + 1); | ||
452 | *bytes -= end - *offset + 1; | 1371 | *bytes -= end - *offset + 1; |
453 | *offset = end + 1; | 1372 | *offset = end + 1; |
454 | } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { | 1373 | } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { |
455 | bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes); | 1374 | bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes); |
456 | *bytes = 0; | 1375 | *bytes = 0; |
457 | } | 1376 | } |
458 | 1377 | ||
459 | if (*bytes) { | 1378 | if (*bytes) { |
460 | struct rb_node *next = rb_next(&bitmap_info->offset_index); | 1379 | struct rb_node *next = rb_next(&bitmap_info->offset_index); |
461 | if (!bitmap_info->bytes) { | 1380 | if (!bitmap_info->bytes) |
462 | unlink_free_space(block_group, bitmap_info); | 1381 | free_bitmap(ctl, bitmap_info); |
463 | kfree(bitmap_info->bitmap); | ||
464 | kfree(bitmap_info); | ||
465 | block_group->total_bitmaps--; | ||
466 | recalculate_thresholds(block_group); | ||
467 | } | ||
468 | 1382 | ||
469 | /* | 1383 | /* |
470 | * no entry after this bitmap, but we still have bytes to | 1384 | * no entry after this bitmap, but we still have bytes to |
@@ -491,38 +1405,59 @@ again: | |||
491 | */ | 1405 | */ |
492 | search_start = *offset; | 1406 | search_start = *offset; |
493 | search_bytes = *bytes; | 1407 | search_bytes = *bytes; |
494 | ret = search_bitmap(block_group, bitmap_info, &search_start, | 1408 | ret = search_bitmap(ctl, bitmap_info, &search_start, |
495 | &search_bytes); | 1409 | &search_bytes); |
496 | if (ret < 0 || search_start != *offset) | 1410 | if (ret < 0 || search_start != *offset) |
497 | return -EAGAIN; | 1411 | return -EAGAIN; |
498 | 1412 | ||
499 | goto again; | 1413 | goto again; |
500 | } else if (!bitmap_info->bytes) { | 1414 | } else if (!bitmap_info->bytes) |
501 | unlink_free_space(block_group, bitmap_info); | 1415 | free_bitmap(ctl, bitmap_info); |
502 | kfree(bitmap_info->bitmap); | ||
503 | kfree(bitmap_info); | ||
504 | block_group->total_bitmaps--; | ||
505 | recalculate_thresholds(block_group); | ||
506 | } | ||
507 | 1416 | ||
508 | return 0; | 1417 | return 0; |
509 | } | 1418 | } |
510 | 1419 | ||
511 | static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, | 1420 | static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, |
512 | struct btrfs_free_space *info) | 1421 | struct btrfs_free_space *info, u64 offset, |
1422 | u64 bytes) | ||
513 | { | 1423 | { |
514 | struct btrfs_free_space *bitmap_info; | 1424 | u64 bytes_to_set = 0; |
515 | int added = 0; | 1425 | u64 end; |
516 | u64 bytes, offset, end; | 1426 | |
517 | int ret; | 1427 | end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); |
1428 | |||
1429 | bytes_to_set = min(end - offset, bytes); | ||
1430 | |||
1431 | bitmap_set_bits(ctl, info, offset, bytes_to_set); | ||
1432 | |||
1433 | return bytes_to_set; | ||
1434 | |||
1435 | } | ||
1436 | |||
1437 | static bool use_bitmap(struct btrfs_free_space_ctl *ctl, | ||
1438 | struct btrfs_free_space *info) | ||
1439 | { | ||
1440 | struct btrfs_block_group_cache *block_group = ctl->private; | ||
518 | 1441 | ||
519 | /* | 1442 | /* |
520 | * If we are below the extents threshold then we can add this as an | 1443 | * If we are below the extents threshold then we can add this as an |
521 | * extent, and don't have to deal with the bitmap | 1444 | * extent, and don't have to deal with the bitmap |
522 | */ | 1445 | */ |
523 | if (block_group->free_extents < block_group->extents_thresh && | 1446 | if (ctl->free_extents < ctl->extents_thresh) { |
524 | info->bytes > block_group->sectorsize * 4) | 1447 | /* |
525 | return 0; | 1448 | * If this block group has some small extents we don't want to |
1449 | * use up all of our free slots in the cache with them, we want | ||
1450 | * to reserve them to larger extents, however if we have plent | ||
1451 | * of cache left then go ahead an dadd them, no sense in adding | ||
1452 | * the overhead of a bitmap if we don't have to. | ||
1453 | */ | ||
1454 | if (info->bytes <= block_group->sectorsize * 4) { | ||
1455 | if (ctl->free_extents * 2 <= ctl->extents_thresh) | ||
1456 | return false; | ||
1457 | } else { | ||
1458 | return false; | ||
1459 | } | ||
1460 | } | ||
526 | 1461 | ||
527 | /* | 1462 | /* |
528 | * some block groups are so tiny they can't be enveloped by a bitmap, so | 1463 | * some block groups are so tiny they can't be enveloped by a bitmap, so |
@@ -530,35 +1465,85 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, | |||
530 | */ | 1465 | */ |
531 | if (BITS_PER_BITMAP * block_group->sectorsize > | 1466 | if (BITS_PER_BITMAP * block_group->sectorsize > |
532 | block_group->key.offset) | 1467 | block_group->key.offset) |
533 | return 0; | 1468 | return false; |
1469 | |||
1470 | return true; | ||
1471 | } | ||
1472 | |||
1473 | static struct btrfs_free_space_op free_space_op = { | ||
1474 | .recalc_thresholds = recalculate_thresholds, | ||
1475 | .use_bitmap = use_bitmap, | ||
1476 | }; | ||
1477 | |||
1478 | static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, | ||
1479 | struct btrfs_free_space *info) | ||
1480 | { | ||
1481 | struct btrfs_free_space *bitmap_info; | ||
1482 | struct btrfs_block_group_cache *block_group = NULL; | ||
1483 | int added = 0; | ||
1484 | u64 bytes, offset, bytes_added; | ||
1485 | int ret; | ||
534 | 1486 | ||
535 | bytes = info->bytes; | 1487 | bytes = info->bytes; |
536 | offset = info->offset; | 1488 | offset = info->offset; |
537 | 1489 | ||
1490 | if (!ctl->op->use_bitmap(ctl, info)) | ||
1491 | return 0; | ||
1492 | |||
1493 | if (ctl->op == &free_space_op) | ||
1494 | block_group = ctl->private; | ||
538 | again: | 1495 | again: |
539 | bitmap_info = tree_search_offset(block_group, | 1496 | /* |
540 | offset_to_bitmap(block_group, offset), | 1497 | * Since we link bitmaps right into the cluster we need to see if we |
1498 | * have a cluster here, and if so and it has our bitmap we need to add | ||
1499 | * the free space to that bitmap. | ||
1500 | */ | ||
1501 | if (block_group && !list_empty(&block_group->cluster_list)) { | ||
1502 | struct btrfs_free_cluster *cluster; | ||
1503 | struct rb_node *node; | ||
1504 | struct btrfs_free_space *entry; | ||
1505 | |||
1506 | cluster = list_entry(block_group->cluster_list.next, | ||
1507 | struct btrfs_free_cluster, | ||
1508 | block_group_list); | ||
1509 | spin_lock(&cluster->lock); | ||
1510 | node = rb_first(&cluster->root); | ||
1511 | if (!node) { | ||
1512 | spin_unlock(&cluster->lock); | ||
1513 | goto no_cluster_bitmap; | ||
1514 | } | ||
1515 | |||
1516 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
1517 | if (!entry->bitmap) { | ||
1518 | spin_unlock(&cluster->lock); | ||
1519 | goto no_cluster_bitmap; | ||
1520 | } | ||
1521 | |||
1522 | if (entry->offset == offset_to_bitmap(ctl, offset)) { | ||
1523 | bytes_added = add_bytes_to_bitmap(ctl, entry, | ||
1524 | offset, bytes); | ||
1525 | bytes -= bytes_added; | ||
1526 | offset += bytes_added; | ||
1527 | } | ||
1528 | spin_unlock(&cluster->lock); | ||
1529 | if (!bytes) { | ||
1530 | ret = 1; | ||
1531 | goto out; | ||
1532 | } | ||
1533 | } | ||
1534 | |||
1535 | no_cluster_bitmap: | ||
1536 | bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), | ||
541 | 1, 0); | 1537 | 1, 0); |
542 | if (!bitmap_info) { | 1538 | if (!bitmap_info) { |
543 | BUG_ON(added); | 1539 | BUG_ON(added); |
544 | goto new_bitmap; | 1540 | goto new_bitmap; |
545 | } | 1541 | } |
546 | 1542 | ||
547 | end = bitmap_info->offset + | 1543 | bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes); |
548 | (u64)(BITS_PER_BITMAP * block_group->sectorsize); | 1544 | bytes -= bytes_added; |
549 | 1545 | offset += bytes_added; | |
550 | if (offset >= bitmap_info->offset && offset + bytes > end) { | 1546 | added = 0; |
551 | bitmap_set_bits(block_group, bitmap_info, offset, | ||
552 | end - offset); | ||
553 | bytes -= end - offset; | ||
554 | offset = end; | ||
555 | added = 0; | ||
556 | } else if (offset >= bitmap_info->offset && offset + bytes <= end) { | ||
557 | bitmap_set_bits(block_group, bitmap_info, offset, bytes); | ||
558 | bytes = 0; | ||
559 | } else { | ||
560 | BUG(); | ||
561 | } | ||
562 | 1547 | ||
563 | if (!bytes) { | 1548 | if (!bytes) { |
564 | ret = 1; | 1549 | ret = 1; |
@@ -568,19 +1553,19 @@ again: | |||
568 | 1553 | ||
569 | new_bitmap: | 1554 | new_bitmap: |
570 | if (info && info->bitmap) { | 1555 | if (info && info->bitmap) { |
571 | add_new_bitmap(block_group, info, offset); | 1556 | add_new_bitmap(ctl, info, offset); |
572 | added = 1; | 1557 | added = 1; |
573 | info = NULL; | 1558 | info = NULL; |
574 | goto again; | 1559 | goto again; |
575 | } else { | 1560 | } else { |
576 | spin_unlock(&block_group->tree_lock); | 1561 | spin_unlock(&ctl->tree_lock); |
577 | 1562 | ||
578 | /* no pre-allocated info, allocate a new one */ | 1563 | /* no pre-allocated info, allocate a new one */ |
579 | if (!info) { | 1564 | if (!info) { |
580 | info = kzalloc(sizeof(struct btrfs_free_space), | 1565 | info = kmem_cache_zalloc(btrfs_free_space_cachep, |
581 | GFP_NOFS); | 1566 | GFP_NOFS); |
582 | if (!info) { | 1567 | if (!info) { |
583 | spin_lock(&block_group->tree_lock); | 1568 | spin_lock(&ctl->tree_lock); |
584 | ret = -ENOMEM; | 1569 | ret = -ENOMEM; |
585 | goto out; | 1570 | goto out; |
586 | } | 1571 | } |
@@ -588,7 +1573,7 @@ new_bitmap: | |||
588 | 1573 | ||
589 | /* allocate the bitmap */ | 1574 | /* allocate the bitmap */ |
590 | info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); | 1575 | info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); |
591 | spin_lock(&block_group->tree_lock); | 1576 | spin_lock(&ctl->tree_lock); |
592 | if (!info->bitmap) { | 1577 | if (!info->bitmap) { |
593 | ret = -ENOMEM; | 1578 | ret = -ENOMEM; |
594 | goto out; | 1579 | goto out; |
@@ -600,77 +1585,94 @@ out: | |||
600 | if (info) { | 1585 | if (info) { |
601 | if (info->bitmap) | 1586 | if (info->bitmap) |
602 | kfree(info->bitmap); | 1587 | kfree(info->bitmap); |
603 | kfree(info); | 1588 | kmem_cache_free(btrfs_free_space_cachep, info); |
604 | } | 1589 | } |
605 | 1590 | ||
606 | return ret; | 1591 | return ret; |
607 | } | 1592 | } |
608 | 1593 | ||
609 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 1594 | static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, |
610 | u64 offset, u64 bytes) | 1595 | struct btrfs_free_space *info, bool update_stat) |
611 | { | 1596 | { |
612 | struct btrfs_free_space *right_info = NULL; | 1597 | struct btrfs_free_space *left_info; |
613 | struct btrfs_free_space *left_info = NULL; | 1598 | struct btrfs_free_space *right_info; |
614 | struct btrfs_free_space *info = NULL; | 1599 | bool merged = false; |
615 | int ret = 0; | 1600 | u64 offset = info->offset; |
616 | 1601 | u64 bytes = info->bytes; | |
617 | info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); | ||
618 | if (!info) | ||
619 | return -ENOMEM; | ||
620 | |||
621 | info->offset = offset; | ||
622 | info->bytes = bytes; | ||
623 | |||
624 | spin_lock(&block_group->tree_lock); | ||
625 | 1602 | ||
626 | /* | 1603 | /* |
627 | * first we want to see if there is free space adjacent to the range we | 1604 | * first we want to see if there is free space adjacent to the range we |
628 | * are adding, if there is remove that struct and add a new one to | 1605 | * are adding, if there is remove that struct and add a new one to |
629 | * cover the entire range | 1606 | * cover the entire range |
630 | */ | 1607 | */ |
631 | right_info = tree_search_offset(block_group, offset + bytes, 0, 0); | 1608 | right_info = tree_search_offset(ctl, offset + bytes, 0, 0); |
632 | if (right_info && rb_prev(&right_info->offset_index)) | 1609 | if (right_info && rb_prev(&right_info->offset_index)) |
633 | left_info = rb_entry(rb_prev(&right_info->offset_index), | 1610 | left_info = rb_entry(rb_prev(&right_info->offset_index), |
634 | struct btrfs_free_space, offset_index); | 1611 | struct btrfs_free_space, offset_index); |
635 | else | 1612 | else |
636 | left_info = tree_search_offset(block_group, offset - 1, 0, 0); | 1613 | left_info = tree_search_offset(ctl, offset - 1, 0, 0); |
637 | |||
638 | /* | ||
639 | * If there was no extent directly to the left or right of this new | ||
640 | * extent then we know we're going to have to allocate a new extent, so | ||
641 | * before we do that see if we need to drop this into a bitmap | ||
642 | */ | ||
643 | if ((!left_info || left_info->bitmap) && | ||
644 | (!right_info || right_info->bitmap)) { | ||
645 | ret = insert_into_bitmap(block_group, info); | ||
646 | |||
647 | if (ret < 0) { | ||
648 | goto out; | ||
649 | } else if (ret) { | ||
650 | ret = 0; | ||
651 | goto out; | ||
652 | } | ||
653 | } | ||
654 | 1614 | ||
655 | if (right_info && !right_info->bitmap) { | 1615 | if (right_info && !right_info->bitmap) { |
656 | unlink_free_space(block_group, right_info); | 1616 | if (update_stat) |
1617 | unlink_free_space(ctl, right_info); | ||
1618 | else | ||
1619 | __unlink_free_space(ctl, right_info); | ||
657 | info->bytes += right_info->bytes; | 1620 | info->bytes += right_info->bytes; |
658 | kfree(right_info); | 1621 | kmem_cache_free(btrfs_free_space_cachep, right_info); |
1622 | merged = true; | ||
659 | } | 1623 | } |
660 | 1624 | ||
661 | if (left_info && !left_info->bitmap && | 1625 | if (left_info && !left_info->bitmap && |
662 | left_info->offset + left_info->bytes == offset) { | 1626 | left_info->offset + left_info->bytes == offset) { |
663 | unlink_free_space(block_group, left_info); | 1627 | if (update_stat) |
1628 | unlink_free_space(ctl, left_info); | ||
1629 | else | ||
1630 | __unlink_free_space(ctl, left_info); | ||
664 | info->offset = left_info->offset; | 1631 | info->offset = left_info->offset; |
665 | info->bytes += left_info->bytes; | 1632 | info->bytes += left_info->bytes; |
666 | kfree(left_info); | 1633 | kmem_cache_free(btrfs_free_space_cachep, left_info); |
1634 | merged = true; | ||
667 | } | 1635 | } |
668 | 1636 | ||
669 | ret = link_free_space(block_group, info); | 1637 | return merged; |
1638 | } | ||
1639 | |||
1640 | int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, | ||
1641 | u64 offset, u64 bytes) | ||
1642 | { | ||
1643 | struct btrfs_free_space *info; | ||
1644 | int ret = 0; | ||
1645 | |||
1646 | info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS); | ||
1647 | if (!info) | ||
1648 | return -ENOMEM; | ||
1649 | |||
1650 | info->offset = offset; | ||
1651 | info->bytes = bytes; | ||
1652 | |||
1653 | spin_lock(&ctl->tree_lock); | ||
1654 | |||
1655 | if (try_merge_free_space(ctl, info, true)) | ||
1656 | goto link; | ||
1657 | |||
1658 | /* | ||
1659 | * There was no extent directly to the left or right of this new | ||
1660 | * extent then we know we're going to have to allocate a new extent, so | ||
1661 | * before we do that see if we need to drop this into a bitmap | ||
1662 | */ | ||
1663 | ret = insert_into_bitmap(ctl, info); | ||
1664 | if (ret < 0) { | ||
1665 | goto out; | ||
1666 | } else if (ret) { | ||
1667 | ret = 0; | ||
1668 | goto out; | ||
1669 | } | ||
1670 | link: | ||
1671 | ret = link_free_space(ctl, info); | ||
670 | if (ret) | 1672 | if (ret) |
671 | kfree(info); | 1673 | kmem_cache_free(btrfs_free_space_cachep, info); |
672 | out: | 1674 | out: |
673 | spin_unlock(&block_group->tree_lock); | 1675 | spin_unlock(&ctl->tree_lock); |
674 | 1676 | ||
675 | if (ret) { | 1677 | if (ret) { |
676 | printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); | 1678 | printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); |
@@ -683,21 +1685,21 @@ out: | |||
683 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | 1685 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |
684 | u64 offset, u64 bytes) | 1686 | u64 offset, u64 bytes) |
685 | { | 1687 | { |
1688 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
686 | struct btrfs_free_space *info; | 1689 | struct btrfs_free_space *info; |
687 | struct btrfs_free_space *next_info = NULL; | 1690 | struct btrfs_free_space *next_info = NULL; |
688 | int ret = 0; | 1691 | int ret = 0; |
689 | 1692 | ||
690 | spin_lock(&block_group->tree_lock); | 1693 | spin_lock(&ctl->tree_lock); |
691 | 1694 | ||
692 | again: | 1695 | again: |
693 | info = tree_search_offset(block_group, offset, 0, 0); | 1696 | info = tree_search_offset(ctl, offset, 0, 0); |
694 | if (!info) { | 1697 | if (!info) { |
695 | /* | 1698 | /* |
696 | * oops didn't find an extent that matched the space we wanted | 1699 | * oops didn't find an extent that matched the space we wanted |
697 | * to remove, look for a bitmap instead | 1700 | * to remove, look for a bitmap instead |
698 | */ | 1701 | */ |
699 | info = tree_search_offset(block_group, | 1702 | info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
700 | offset_to_bitmap(block_group, offset), | ||
701 | 1, 0); | 1703 | 1, 0); |
702 | if (!info) { | 1704 | if (!info) { |
703 | WARN_ON(1); | 1705 | WARN_ON(1); |
@@ -712,8 +1714,8 @@ again: | |||
712 | offset_index); | 1714 | offset_index); |
713 | 1715 | ||
714 | if (next_info->bitmap) | 1716 | if (next_info->bitmap) |
715 | end = next_info->offset + BITS_PER_BITMAP * | 1717 | end = next_info->offset + |
716 | block_group->sectorsize - 1; | 1718 | BITS_PER_BITMAP * ctl->unit - 1; |
717 | else | 1719 | else |
718 | end = next_info->offset + next_info->bytes; | 1720 | end = next_info->offset + next_info->bytes; |
719 | 1721 | ||
@@ -733,20 +1735,20 @@ again: | |||
733 | } | 1735 | } |
734 | 1736 | ||
735 | if (info->bytes == bytes) { | 1737 | if (info->bytes == bytes) { |
736 | unlink_free_space(block_group, info); | 1738 | unlink_free_space(ctl, info); |
737 | if (info->bitmap) { | 1739 | if (info->bitmap) { |
738 | kfree(info->bitmap); | 1740 | kfree(info->bitmap); |
739 | block_group->total_bitmaps--; | 1741 | ctl->total_bitmaps--; |
740 | } | 1742 | } |
741 | kfree(info); | 1743 | kmem_cache_free(btrfs_free_space_cachep, info); |
742 | goto out_lock; | 1744 | goto out_lock; |
743 | } | 1745 | } |
744 | 1746 | ||
745 | if (!info->bitmap && info->offset == offset) { | 1747 | if (!info->bitmap && info->offset == offset) { |
746 | unlink_free_space(block_group, info); | 1748 | unlink_free_space(ctl, info); |
747 | info->offset += bytes; | 1749 | info->offset += bytes; |
748 | info->bytes -= bytes; | 1750 | info->bytes -= bytes; |
749 | link_free_space(block_group, info); | 1751 | link_free_space(ctl, info); |
750 | goto out_lock; | 1752 | goto out_lock; |
751 | } | 1753 | } |
752 | 1754 | ||
@@ -760,13 +1762,13 @@ again: | |||
760 | * first unlink the old info and then | 1762 | * first unlink the old info and then |
761 | * insert it again after the hole we're creating | 1763 | * insert it again after the hole we're creating |
762 | */ | 1764 | */ |
763 | unlink_free_space(block_group, info); | 1765 | unlink_free_space(ctl, info); |
764 | if (offset + bytes < info->offset + info->bytes) { | 1766 | if (offset + bytes < info->offset + info->bytes) { |
765 | u64 old_end = info->offset + info->bytes; | 1767 | u64 old_end = info->offset + info->bytes; |
766 | 1768 | ||
767 | info->offset = offset + bytes; | 1769 | info->offset = offset + bytes; |
768 | info->bytes = old_end - info->offset; | 1770 | info->bytes = old_end - info->offset; |
769 | ret = link_free_space(block_group, info); | 1771 | ret = link_free_space(ctl, info); |
770 | WARN_ON(ret); | 1772 | WARN_ON(ret); |
771 | if (ret) | 1773 | if (ret) |
772 | goto out_lock; | 1774 | goto out_lock; |
@@ -774,9 +1776,9 @@ again: | |||
774 | /* the hole we're creating ends at the end | 1776 | /* the hole we're creating ends at the end |
775 | * of the info struct, just free the info | 1777 | * of the info struct, just free the info |
776 | */ | 1778 | */ |
777 | kfree(info); | 1779 | kmem_cache_free(btrfs_free_space_cachep, info); |
778 | } | 1780 | } |
779 | spin_unlock(&block_group->tree_lock); | 1781 | spin_unlock(&ctl->tree_lock); |
780 | 1782 | ||
781 | /* step two, insert a new info struct to cover | 1783 | /* step two, insert a new info struct to cover |
782 | * anything before the hole | 1784 | * anything before the hole |
@@ -787,12 +1789,12 @@ again: | |||
787 | goto out; | 1789 | goto out; |
788 | } | 1790 | } |
789 | 1791 | ||
790 | ret = remove_from_bitmap(block_group, info, &offset, &bytes); | 1792 | ret = remove_from_bitmap(ctl, info, &offset, &bytes); |
791 | if (ret == -EAGAIN) | 1793 | if (ret == -EAGAIN) |
792 | goto again; | 1794 | goto again; |
793 | BUG_ON(ret); | 1795 | BUG_ON(ret); |
794 | out_lock: | 1796 | out_lock: |
795 | spin_unlock(&block_group->tree_lock); | 1797 | spin_unlock(&ctl->tree_lock); |
796 | out: | 1798 | out: |
797 | return ret; | 1799 | return ret; |
798 | } | 1800 | } |
@@ -800,11 +1802,12 @@ out: | |||
800 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | 1802 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, |
801 | u64 bytes) | 1803 | u64 bytes) |
802 | { | 1804 | { |
1805 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
803 | struct btrfs_free_space *info; | 1806 | struct btrfs_free_space *info; |
804 | struct rb_node *n; | 1807 | struct rb_node *n; |
805 | int count = 0; | 1808 | int count = 0; |
806 | 1809 | ||
807 | for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) { | 1810 | for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { |
808 | info = rb_entry(n, struct btrfs_free_space, offset_index); | 1811 | info = rb_entry(n, struct btrfs_free_space, offset_index); |
809 | if (info->bytes >= bytes) | 1812 | if (info->bytes >= bytes) |
810 | count++; | 1813 | count++; |
@@ -819,19 +1822,23 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | |||
819 | "\n", count); | 1822 | "\n", count); |
820 | } | 1823 | } |
821 | 1824 | ||
822 | u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group) | 1825 | void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) |
823 | { | 1826 | { |
824 | struct btrfs_free_space *info; | 1827 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
825 | struct rb_node *n; | ||
826 | u64 ret = 0; | ||
827 | 1828 | ||
828 | for (n = rb_first(&block_group->free_space_offset); n; | 1829 | spin_lock_init(&ctl->tree_lock); |
829 | n = rb_next(n)) { | 1830 | ctl->unit = block_group->sectorsize; |
830 | info = rb_entry(n, struct btrfs_free_space, offset_index); | 1831 | ctl->start = block_group->key.objectid; |
831 | ret += info->bytes; | 1832 | ctl->private = block_group; |
832 | } | 1833 | ctl->op = &free_space_op; |
833 | 1834 | ||
834 | return ret; | 1835 | /* |
1836 | * we only want to have 32k of ram per block group for keeping | ||
1837 | * track of free space, and if we pass 1/2 of that we want to | ||
1838 | * start converting things over to using bitmaps | ||
1839 | */ | ||
1840 | ctl->extents_thresh = ((1024 * 32) / 2) / | ||
1841 | sizeof(struct btrfs_free_space); | ||
835 | } | 1842 | } |
836 | 1843 | ||
837 | /* | 1844 | /* |
@@ -845,31 +1852,31 @@ __btrfs_return_cluster_to_free_space( | |||
845 | struct btrfs_block_group_cache *block_group, | 1852 | struct btrfs_block_group_cache *block_group, |
846 | struct btrfs_free_cluster *cluster) | 1853 | struct btrfs_free_cluster *cluster) |
847 | { | 1854 | { |
1855 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
848 | struct btrfs_free_space *entry; | 1856 | struct btrfs_free_space *entry; |
849 | struct rb_node *node; | 1857 | struct rb_node *node; |
850 | bool bitmap; | ||
851 | 1858 | ||
852 | spin_lock(&cluster->lock); | 1859 | spin_lock(&cluster->lock); |
853 | if (cluster->block_group != block_group) | 1860 | if (cluster->block_group != block_group) |
854 | goto out; | 1861 | goto out; |
855 | 1862 | ||
856 | bitmap = cluster->points_to_bitmap; | ||
857 | cluster->block_group = NULL; | 1863 | cluster->block_group = NULL; |
858 | cluster->window_start = 0; | 1864 | cluster->window_start = 0; |
859 | list_del_init(&cluster->block_group_list); | 1865 | list_del_init(&cluster->block_group_list); |
860 | cluster->points_to_bitmap = false; | ||
861 | |||
862 | if (bitmap) | ||
863 | goto out; | ||
864 | 1866 | ||
865 | node = rb_first(&cluster->root); | 1867 | node = rb_first(&cluster->root); |
866 | while (node) { | 1868 | while (node) { |
1869 | bool bitmap; | ||
1870 | |||
867 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 1871 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
868 | node = rb_next(&entry->offset_index); | 1872 | node = rb_next(&entry->offset_index); |
869 | rb_erase(&entry->offset_index, &cluster->root); | 1873 | rb_erase(&entry->offset_index, &cluster->root); |
870 | BUG_ON(entry->bitmap); | 1874 | |
871 | tree_insert_offset(&block_group->free_space_offset, | 1875 | bitmap = (entry->bitmap != NULL); |
872 | entry->offset, &entry->offset_index, 0); | 1876 | if (!bitmap) |
1877 | try_merge_free_space(ctl, entry, false); | ||
1878 | tree_insert_offset(&ctl->free_space_offset, | ||
1879 | entry->offset, &entry->offset_index, bitmap); | ||
873 | } | 1880 | } |
874 | cluster->root = RB_ROOT; | 1881 | cluster->root = RB_ROOT; |
875 | 1882 | ||
@@ -879,14 +1886,41 @@ out: | |||
879 | return 0; | 1886 | return 0; |
880 | } | 1887 | } |
881 | 1888 | ||
882 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | 1889 | void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl) |
883 | { | 1890 | { |
884 | struct btrfs_free_space *info; | 1891 | struct btrfs_free_space *info; |
885 | struct rb_node *node; | 1892 | struct rb_node *node; |
1893 | |||
1894 | while ((node = rb_last(&ctl->free_space_offset)) != NULL) { | ||
1895 | info = rb_entry(node, struct btrfs_free_space, offset_index); | ||
1896 | if (!info->bitmap) { | ||
1897 | unlink_free_space(ctl, info); | ||
1898 | kmem_cache_free(btrfs_free_space_cachep, info); | ||
1899 | } else { | ||
1900 | free_bitmap(ctl, info); | ||
1901 | } | ||
1902 | if (need_resched()) { | ||
1903 | spin_unlock(&ctl->tree_lock); | ||
1904 | cond_resched(); | ||
1905 | spin_lock(&ctl->tree_lock); | ||
1906 | } | ||
1907 | } | ||
1908 | } | ||
1909 | |||
1910 | void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) | ||
1911 | { | ||
1912 | spin_lock(&ctl->tree_lock); | ||
1913 | __btrfs_remove_free_space_cache_locked(ctl); | ||
1914 | spin_unlock(&ctl->tree_lock); | ||
1915 | } | ||
1916 | |||
1917 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | ||
1918 | { | ||
1919 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
886 | struct btrfs_free_cluster *cluster; | 1920 | struct btrfs_free_cluster *cluster; |
887 | struct list_head *head; | 1921 | struct list_head *head; |
888 | 1922 | ||
889 | spin_lock(&block_group->tree_lock); | 1923 | spin_lock(&ctl->tree_lock); |
890 | while ((head = block_group->cluster_list.next) != | 1924 | while ((head = block_group->cluster_list.next) != |
891 | &block_group->cluster_list) { | 1925 | &block_group->cluster_list) { |
892 | cluster = list_entry(head, struct btrfs_free_cluster, | 1926 | cluster = list_entry(head, struct btrfs_free_cluster, |
@@ -895,62 +1929,46 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | |||
895 | WARN_ON(cluster->block_group != block_group); | 1929 | WARN_ON(cluster->block_group != block_group); |
896 | __btrfs_return_cluster_to_free_space(block_group, cluster); | 1930 | __btrfs_return_cluster_to_free_space(block_group, cluster); |
897 | if (need_resched()) { | 1931 | if (need_resched()) { |
898 | spin_unlock(&block_group->tree_lock); | 1932 | spin_unlock(&ctl->tree_lock); |
899 | cond_resched(); | ||
900 | spin_lock(&block_group->tree_lock); | ||
901 | } | ||
902 | } | ||
903 | |||
904 | while ((node = rb_last(&block_group->free_space_offset)) != NULL) { | ||
905 | info = rb_entry(node, struct btrfs_free_space, offset_index); | ||
906 | unlink_free_space(block_group, info); | ||
907 | if (info->bitmap) | ||
908 | kfree(info->bitmap); | ||
909 | kfree(info); | ||
910 | if (need_resched()) { | ||
911 | spin_unlock(&block_group->tree_lock); | ||
912 | cond_resched(); | 1933 | cond_resched(); |
913 | spin_lock(&block_group->tree_lock); | 1934 | spin_lock(&ctl->tree_lock); |
914 | } | 1935 | } |
915 | } | 1936 | } |
1937 | __btrfs_remove_free_space_cache_locked(ctl); | ||
1938 | spin_unlock(&ctl->tree_lock); | ||
916 | 1939 | ||
917 | spin_unlock(&block_group->tree_lock); | ||
918 | } | 1940 | } |
919 | 1941 | ||
920 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | 1942 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, |
921 | u64 offset, u64 bytes, u64 empty_size) | 1943 | u64 offset, u64 bytes, u64 empty_size) |
922 | { | 1944 | { |
1945 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
923 | struct btrfs_free_space *entry = NULL; | 1946 | struct btrfs_free_space *entry = NULL; |
924 | u64 bytes_search = bytes + empty_size; | 1947 | u64 bytes_search = bytes + empty_size; |
925 | u64 ret = 0; | 1948 | u64 ret = 0; |
926 | 1949 | ||
927 | spin_lock(&block_group->tree_lock); | 1950 | spin_lock(&ctl->tree_lock); |
928 | entry = find_free_space(block_group, &offset, &bytes_search, 0); | 1951 | entry = find_free_space(ctl, &offset, &bytes_search); |
929 | if (!entry) | 1952 | if (!entry) |
930 | goto out; | 1953 | goto out; |
931 | 1954 | ||
932 | ret = offset; | 1955 | ret = offset; |
933 | if (entry->bitmap) { | 1956 | if (entry->bitmap) { |
934 | bitmap_clear_bits(block_group, entry, offset, bytes); | 1957 | bitmap_clear_bits(ctl, entry, offset, bytes); |
935 | if (!entry->bytes) { | 1958 | if (!entry->bytes) |
936 | unlink_free_space(block_group, entry); | 1959 | free_bitmap(ctl, entry); |
937 | kfree(entry->bitmap); | ||
938 | kfree(entry); | ||
939 | block_group->total_bitmaps--; | ||
940 | recalculate_thresholds(block_group); | ||
941 | } | ||
942 | } else { | 1960 | } else { |
943 | unlink_free_space(block_group, entry); | 1961 | unlink_free_space(ctl, entry); |
944 | entry->offset += bytes; | 1962 | entry->offset += bytes; |
945 | entry->bytes -= bytes; | 1963 | entry->bytes -= bytes; |
946 | if (!entry->bytes) | 1964 | if (!entry->bytes) |
947 | kfree(entry); | 1965 | kmem_cache_free(btrfs_free_space_cachep, entry); |
948 | else | 1966 | else |
949 | link_free_space(block_group, entry); | 1967 | link_free_space(ctl, entry); |
950 | } | 1968 | } |
951 | 1969 | ||
952 | out: | 1970 | out: |
953 | spin_unlock(&block_group->tree_lock); | 1971 | spin_unlock(&ctl->tree_lock); |
954 | 1972 | ||
955 | return ret; | 1973 | return ret; |
956 | } | 1974 | } |
@@ -967,6 +1985,7 @@ int btrfs_return_cluster_to_free_space( | |||
967 | struct btrfs_block_group_cache *block_group, | 1985 | struct btrfs_block_group_cache *block_group, |
968 | struct btrfs_free_cluster *cluster) | 1986 | struct btrfs_free_cluster *cluster) |
969 | { | 1987 | { |
1988 | struct btrfs_free_space_ctl *ctl; | ||
970 | int ret; | 1989 | int ret; |
971 | 1990 | ||
972 | /* first, get a safe pointer to the block group */ | 1991 | /* first, get a safe pointer to the block group */ |
@@ -985,10 +2004,12 @@ int btrfs_return_cluster_to_free_space( | |||
985 | atomic_inc(&block_group->count); | 2004 | atomic_inc(&block_group->count); |
986 | spin_unlock(&cluster->lock); | 2005 | spin_unlock(&cluster->lock); |
987 | 2006 | ||
2007 | ctl = block_group->free_space_ctl; | ||
2008 | |||
988 | /* now return any extents the cluster had on it */ | 2009 | /* now return any extents the cluster had on it */ |
989 | spin_lock(&block_group->tree_lock); | 2010 | spin_lock(&ctl->tree_lock); |
990 | ret = __btrfs_return_cluster_to_free_space(block_group, cluster); | 2011 | ret = __btrfs_return_cluster_to_free_space(block_group, cluster); |
991 | spin_unlock(&block_group->tree_lock); | 2012 | spin_unlock(&ctl->tree_lock); |
992 | 2013 | ||
993 | /* finally drop our ref */ | 2014 | /* finally drop our ref */ |
994 | btrfs_put_block_group(block_group); | 2015 | btrfs_put_block_group(block_group); |
@@ -997,48 +2018,24 @@ int btrfs_return_cluster_to_free_space( | |||
997 | 2018 | ||
998 | static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | 2019 | static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, |
999 | struct btrfs_free_cluster *cluster, | 2020 | struct btrfs_free_cluster *cluster, |
2021 | struct btrfs_free_space *entry, | ||
1000 | u64 bytes, u64 min_start) | 2022 | u64 bytes, u64 min_start) |
1001 | { | 2023 | { |
1002 | struct btrfs_free_space *entry; | 2024 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
1003 | int err; | 2025 | int err; |
1004 | u64 search_start = cluster->window_start; | 2026 | u64 search_start = cluster->window_start; |
1005 | u64 search_bytes = bytes; | 2027 | u64 search_bytes = bytes; |
1006 | u64 ret = 0; | 2028 | u64 ret = 0; |
1007 | 2029 | ||
1008 | spin_lock(&block_group->tree_lock); | ||
1009 | spin_lock(&cluster->lock); | ||
1010 | |||
1011 | if (!cluster->points_to_bitmap) | ||
1012 | goto out; | ||
1013 | |||
1014 | if (cluster->block_group != block_group) | ||
1015 | goto out; | ||
1016 | |||
1017 | /* | ||
1018 | * search_start is the beginning of the bitmap, but at some point it may | ||
1019 | * be a good idea to point to the actual start of the free area in the | ||
1020 | * bitmap, so do the offset_to_bitmap trick anyway, and set bitmap_only | ||
1021 | * to 1 to make sure we get the bitmap entry | ||
1022 | */ | ||
1023 | entry = tree_search_offset(block_group, | ||
1024 | offset_to_bitmap(block_group, search_start), | ||
1025 | 1, 0); | ||
1026 | if (!entry || !entry->bitmap) | ||
1027 | goto out; | ||
1028 | |||
1029 | search_start = min_start; | 2030 | search_start = min_start; |
1030 | search_bytes = bytes; | 2031 | search_bytes = bytes; |
1031 | 2032 | ||
1032 | err = search_bitmap(block_group, entry, &search_start, | 2033 | err = search_bitmap(ctl, entry, &search_start, &search_bytes); |
1033 | &search_bytes); | ||
1034 | if (err) | 2034 | if (err) |
1035 | goto out; | 2035 | return 0; |
1036 | 2036 | ||
1037 | ret = search_start; | 2037 | ret = search_start; |
1038 | bitmap_clear_bits(block_group, entry, ret, bytes); | 2038 | bitmap_clear_bits(ctl, entry, ret, bytes); |
1039 | out: | ||
1040 | spin_unlock(&cluster->lock); | ||
1041 | spin_unlock(&block_group->tree_lock); | ||
1042 | 2039 | ||
1043 | return ret; | 2040 | return ret; |
1044 | } | 2041 | } |
@@ -1052,14 +2049,11 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
1052 | struct btrfs_free_cluster *cluster, u64 bytes, | 2049 | struct btrfs_free_cluster *cluster, u64 bytes, |
1053 | u64 min_start) | 2050 | u64 min_start) |
1054 | { | 2051 | { |
2052 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1055 | struct btrfs_free_space *entry = NULL; | 2053 | struct btrfs_free_space *entry = NULL; |
1056 | struct rb_node *node; | 2054 | struct rb_node *node; |
1057 | u64 ret = 0; | 2055 | u64 ret = 0; |
1058 | 2056 | ||
1059 | if (cluster->points_to_bitmap) | ||
1060 | return btrfs_alloc_from_bitmap(block_group, cluster, bytes, | ||
1061 | min_start); | ||
1062 | |||
1063 | spin_lock(&cluster->lock); | 2057 | spin_lock(&cluster->lock); |
1064 | if (bytes > cluster->max_size) | 2058 | if (bytes > cluster->max_size) |
1065 | goto out; | 2059 | goto out; |
@@ -1072,11 +2066,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
1072 | goto out; | 2066 | goto out; |
1073 | 2067 | ||
1074 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 2068 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
1075 | |||
1076 | while(1) { | 2069 | while(1) { |
1077 | if (entry->bytes < bytes || entry->offset < min_start) { | 2070 | if (entry->bytes < bytes || |
1078 | struct rb_node *node; | 2071 | (!entry->bitmap && entry->offset < min_start)) { |
1079 | |||
1080 | node = rb_next(&entry->offset_index); | 2072 | node = rb_next(&entry->offset_index); |
1081 | if (!node) | 2073 | if (!node) |
1082 | break; | 2074 | break; |
@@ -1084,20 +2076,52 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
1084 | offset_index); | 2076 | offset_index); |
1085 | continue; | 2077 | continue; |
1086 | } | 2078 | } |
1087 | ret = entry->offset; | ||
1088 | 2079 | ||
1089 | entry->offset += bytes; | 2080 | if (entry->bitmap) { |
1090 | entry->bytes -= bytes; | 2081 | ret = btrfs_alloc_from_bitmap(block_group, |
2082 | cluster, entry, bytes, | ||
2083 | min_start); | ||
2084 | if (ret == 0) { | ||
2085 | node = rb_next(&entry->offset_index); | ||
2086 | if (!node) | ||
2087 | break; | ||
2088 | entry = rb_entry(node, struct btrfs_free_space, | ||
2089 | offset_index); | ||
2090 | continue; | ||
2091 | } | ||
2092 | } else { | ||
1091 | 2093 | ||
1092 | if (entry->bytes == 0) { | 2094 | ret = entry->offset; |
1093 | rb_erase(&entry->offset_index, &cluster->root); | 2095 | |
1094 | kfree(entry); | 2096 | entry->offset += bytes; |
2097 | entry->bytes -= bytes; | ||
1095 | } | 2098 | } |
2099 | |||
2100 | if (entry->bytes == 0) | ||
2101 | rb_erase(&entry->offset_index, &cluster->root); | ||
1096 | break; | 2102 | break; |
1097 | } | 2103 | } |
1098 | out: | 2104 | out: |
1099 | spin_unlock(&cluster->lock); | 2105 | spin_unlock(&cluster->lock); |
1100 | 2106 | ||
2107 | if (!ret) | ||
2108 | return 0; | ||
2109 | |||
2110 | spin_lock(&ctl->tree_lock); | ||
2111 | |||
2112 | ctl->free_space -= bytes; | ||
2113 | if (entry->bytes == 0) { | ||
2114 | ctl->free_extents--; | ||
2115 | if (entry->bitmap) { | ||
2116 | kfree(entry->bitmap); | ||
2117 | ctl->total_bitmaps--; | ||
2118 | ctl->op->recalc_thresholds(ctl); | ||
2119 | } | ||
2120 | kmem_cache_free(btrfs_free_space_cachep, entry); | ||
2121 | } | ||
2122 | |||
2123 | spin_unlock(&ctl->tree_lock); | ||
2124 | |||
1101 | return ret; | 2125 | return ret; |
1102 | } | 2126 | } |
1103 | 2127 | ||
@@ -1106,6 +2130,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, | |||
1106 | struct btrfs_free_cluster *cluster, | 2130 | struct btrfs_free_cluster *cluster, |
1107 | u64 offset, u64 bytes, u64 min_bytes) | 2131 | u64 offset, u64 bytes, u64 min_bytes) |
1108 | { | 2132 | { |
2133 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1109 | unsigned long next_zero; | 2134 | unsigned long next_zero; |
1110 | unsigned long i; | 2135 | unsigned long i; |
1111 | unsigned long search_bits; | 2136 | unsigned long search_bits; |
@@ -1113,12 +2138,13 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, | |||
1113 | unsigned long found_bits; | 2138 | unsigned long found_bits; |
1114 | unsigned long start = 0; | 2139 | unsigned long start = 0; |
1115 | unsigned long total_found = 0; | 2140 | unsigned long total_found = 0; |
2141 | int ret; | ||
1116 | bool found = false; | 2142 | bool found = false; |
1117 | 2143 | ||
1118 | i = offset_to_bit(entry->offset, block_group->sectorsize, | 2144 | i = offset_to_bit(entry->offset, block_group->sectorsize, |
1119 | max_t(u64, offset, entry->offset)); | 2145 | max_t(u64, offset, entry->offset)); |
1120 | search_bits = bytes_to_bits(min_bytes, block_group->sectorsize); | 2146 | search_bits = bytes_to_bits(bytes, block_group->sectorsize); |
1121 | total_bits = bytes_to_bits(bytes, block_group->sectorsize); | 2147 | total_bits = bytes_to_bits(min_bytes, block_group->sectorsize); |
1122 | 2148 | ||
1123 | again: | 2149 | again: |
1124 | found_bits = 0; | 2150 | found_bits = 0; |
@@ -1135,7 +2161,7 @@ again: | |||
1135 | } | 2161 | } |
1136 | 2162 | ||
1137 | if (!found_bits) | 2163 | if (!found_bits) |
1138 | return -1; | 2164 | return -ENOSPC; |
1139 | 2165 | ||
1140 | if (!found) { | 2166 | if (!found) { |
1141 | start = i; | 2167 | start = i; |
@@ -1159,131 +2185,67 @@ again: | |||
1159 | 2185 | ||
1160 | cluster->window_start = start * block_group->sectorsize + | 2186 | cluster->window_start = start * block_group->sectorsize + |
1161 | entry->offset; | 2187 | entry->offset; |
1162 | cluster->points_to_bitmap = true; | 2188 | rb_erase(&entry->offset_index, &ctl->free_space_offset); |
2189 | ret = tree_insert_offset(&cluster->root, entry->offset, | ||
2190 | &entry->offset_index, 1); | ||
2191 | BUG_ON(ret); | ||
1163 | 2192 | ||
1164 | return 0; | 2193 | return 0; |
1165 | } | 2194 | } |
1166 | 2195 | ||
1167 | /* | 2196 | /* |
1168 | * here we try to find a cluster of blocks in a block group. The goal | 2197 | * This searches the block group for just extents to fill the cluster with. |
1169 | * is to find at least bytes free and up to empty_size + bytes free. | ||
1170 | * We might not find them all in one contiguous area. | ||
1171 | * | ||
1172 | * returns zero and sets up cluster if things worked out, otherwise | ||
1173 | * it returns -enospc | ||
1174 | */ | 2198 | */ |
1175 | int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | 2199 | static noinline int |
1176 | struct btrfs_root *root, | 2200 | setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, |
1177 | struct btrfs_block_group_cache *block_group, | 2201 | struct btrfs_free_cluster *cluster, |
1178 | struct btrfs_free_cluster *cluster, | 2202 | struct list_head *bitmaps, u64 offset, u64 bytes, |
1179 | u64 offset, u64 bytes, u64 empty_size) | 2203 | u64 min_bytes) |
1180 | { | 2204 | { |
2205 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2206 | struct btrfs_free_space *first = NULL; | ||
1181 | struct btrfs_free_space *entry = NULL; | 2207 | struct btrfs_free_space *entry = NULL; |
2208 | struct btrfs_free_space *prev = NULL; | ||
2209 | struct btrfs_free_space *last; | ||
1182 | struct rb_node *node; | 2210 | struct rb_node *node; |
1183 | struct btrfs_free_space *next; | ||
1184 | struct btrfs_free_space *last = NULL; | ||
1185 | u64 min_bytes; | ||
1186 | u64 window_start; | 2211 | u64 window_start; |
1187 | u64 window_free; | 2212 | u64 window_free; |
1188 | u64 max_extent = 0; | 2213 | u64 max_extent; |
1189 | bool found_bitmap = false; | 2214 | u64 max_gap = 128 * 1024; |
1190 | int ret; | ||
1191 | 2215 | ||
1192 | /* for metadata, allow allocates with more holes */ | 2216 | entry = tree_search_offset(ctl, offset, 0, 1); |
1193 | if (btrfs_test_opt(root, SSD_SPREAD)) { | 2217 | if (!entry) |
1194 | min_bytes = bytes + empty_size; | 2218 | return -ENOSPC; |
1195 | } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { | ||
1196 | /* | ||
1197 | * we want to do larger allocations when we are | ||
1198 | * flushing out the delayed refs, it helps prevent | ||
1199 | * making more work as we go along. | ||
1200 | */ | ||
1201 | if (trans->transaction->delayed_refs.flushing) | ||
1202 | min_bytes = max(bytes, (bytes + empty_size) >> 1); | ||
1203 | else | ||
1204 | min_bytes = max(bytes, (bytes + empty_size) >> 4); | ||
1205 | } else | ||
1206 | min_bytes = max(bytes, (bytes + empty_size) >> 2); | ||
1207 | |||
1208 | spin_lock(&block_group->tree_lock); | ||
1209 | spin_lock(&cluster->lock); | ||
1210 | |||
1211 | /* someone already found a cluster, hooray */ | ||
1212 | if (cluster->block_group) { | ||
1213 | ret = 0; | ||
1214 | goto out; | ||
1215 | } | ||
1216 | again: | ||
1217 | entry = tree_search_offset(block_group, offset, found_bitmap, 1); | ||
1218 | if (!entry) { | ||
1219 | ret = -ENOSPC; | ||
1220 | goto out; | ||
1221 | } | ||
1222 | 2219 | ||
1223 | /* | 2220 | /* |
1224 | * If found_bitmap is true, we exhausted our search for extent entries, | 2221 | * We don't want bitmaps, so just move along until we find a normal |
1225 | * and we just want to search all of the bitmaps that we can find, and | 2222 | * extent entry. |
1226 | * ignore any extent entries we find. | ||
1227 | */ | 2223 | */ |
1228 | while (entry->bitmap || found_bitmap || | 2224 | while (entry->bitmap) { |
1229 | (!entry->bitmap && entry->bytes < min_bytes)) { | 2225 | if (list_empty(&entry->list)) |
1230 | struct rb_node *node = rb_next(&entry->offset_index); | 2226 | list_add_tail(&entry->list, bitmaps); |
1231 | 2227 | node = rb_next(&entry->offset_index); | |
1232 | if (entry->bitmap && entry->bytes > bytes + empty_size) { | 2228 | if (!node) |
1233 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, | 2229 | return -ENOSPC; |
1234 | offset, bytes + empty_size, | ||
1235 | min_bytes); | ||
1236 | if (!ret) | ||
1237 | goto got_it; | ||
1238 | } | ||
1239 | |||
1240 | if (!node) { | ||
1241 | ret = -ENOSPC; | ||
1242 | goto out; | ||
1243 | } | ||
1244 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 2230 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
1245 | } | 2231 | } |
1246 | 2232 | ||
1247 | /* | ||
1248 | * We already searched all the extent entries from the passed in offset | ||
1249 | * to the end and didn't find enough space for the cluster, and we also | ||
1250 | * didn't find any bitmaps that met our criteria, just go ahead and exit | ||
1251 | */ | ||
1252 | if (found_bitmap) { | ||
1253 | ret = -ENOSPC; | ||
1254 | goto out; | ||
1255 | } | ||
1256 | |||
1257 | cluster->points_to_bitmap = false; | ||
1258 | window_start = entry->offset; | 2233 | window_start = entry->offset; |
1259 | window_free = entry->bytes; | 2234 | window_free = entry->bytes; |
1260 | last = entry; | ||
1261 | max_extent = entry->bytes; | 2235 | max_extent = entry->bytes; |
2236 | first = entry; | ||
2237 | last = entry; | ||
2238 | prev = entry; | ||
1262 | 2239 | ||
1263 | while (1) { | 2240 | while (window_free <= min_bytes) { |
1264 | /* out window is just right, lets fill it */ | 2241 | node = rb_next(&entry->offset_index); |
1265 | if (window_free >= bytes + empty_size) | 2242 | if (!node) |
1266 | break; | 2243 | return -ENOSPC; |
1267 | 2244 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | |
1268 | node = rb_next(&last->offset_index); | ||
1269 | if (!node) { | ||
1270 | if (found_bitmap) | ||
1271 | goto again; | ||
1272 | ret = -ENOSPC; | ||
1273 | goto out; | ||
1274 | } | ||
1275 | next = rb_entry(node, struct btrfs_free_space, offset_index); | ||
1276 | 2245 | ||
1277 | /* | 2246 | if (entry->bitmap) { |
1278 | * we found a bitmap, so if this search doesn't result in a | 2247 | if (list_empty(&entry->list)) |
1279 | * cluster, we know to go and search again for the bitmaps and | 2248 | list_add_tail(&entry->list, bitmaps); |
1280 | * start looking for space there | ||
1281 | */ | ||
1282 | if (next->bitmap) { | ||
1283 | if (!found_bitmap) | ||
1284 | offset = next->offset; | ||
1285 | found_bitmap = true; | ||
1286 | last = next; | ||
1287 | continue; | 2249 | continue; |
1288 | } | 2250 | } |
1289 | 2251 | ||
@@ -1291,60 +2253,190 @@ again: | |||
1291 | * we haven't filled the empty size and the window is | 2253 | * we haven't filled the empty size and the window is |
1292 | * very large. reset and try again | 2254 | * very large. reset and try again |
1293 | */ | 2255 | */ |
1294 | if (next->offset - (last->offset + last->bytes) > 128 * 1024 || | 2256 | if (entry->offset - (prev->offset + prev->bytes) > max_gap || |
1295 | next->offset - window_start > (bytes + empty_size) * 2) { | 2257 | entry->offset - window_start > (min_bytes * 2)) { |
1296 | entry = next; | 2258 | first = entry; |
1297 | window_start = entry->offset; | 2259 | window_start = entry->offset; |
1298 | window_free = entry->bytes; | 2260 | window_free = entry->bytes; |
1299 | last = entry; | 2261 | last = entry; |
1300 | max_extent = entry->bytes; | 2262 | max_extent = entry->bytes; |
1301 | } else { | 2263 | } else { |
1302 | last = next; | 2264 | last = entry; |
1303 | window_free += next->bytes; | 2265 | window_free += entry->bytes; |
1304 | if (entry->bytes > max_extent) | 2266 | if (entry->bytes > max_extent) |
1305 | max_extent = entry->bytes; | 2267 | max_extent = entry->bytes; |
1306 | } | 2268 | } |
2269 | prev = entry; | ||
1307 | } | 2270 | } |
1308 | 2271 | ||
1309 | cluster->window_start = entry->offset; | 2272 | cluster->window_start = first->offset; |
2273 | |||
2274 | node = &first->offset_index; | ||
1310 | 2275 | ||
1311 | /* | 2276 | /* |
1312 | * now we've found our entries, pull them out of the free space | 2277 | * now we've found our entries, pull them out of the free space |
1313 | * cache and put them into the cluster rbtree | 2278 | * cache and put them into the cluster rbtree |
1314 | * | ||
1315 | * The cluster includes an rbtree, but only uses the offset index | ||
1316 | * of each free space cache entry. | ||
1317 | */ | 2279 | */ |
1318 | while (1) { | 2280 | do { |
2281 | int ret; | ||
2282 | |||
2283 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
1319 | node = rb_next(&entry->offset_index); | 2284 | node = rb_next(&entry->offset_index); |
1320 | if (entry->bitmap && node) { | 2285 | if (entry->bitmap) |
1321 | entry = rb_entry(node, struct btrfs_free_space, | ||
1322 | offset_index); | ||
1323 | continue; | 2286 | continue; |
1324 | } else if (entry->bitmap && !node) { | ||
1325 | break; | ||
1326 | } | ||
1327 | 2287 | ||
1328 | rb_erase(&entry->offset_index, &block_group->free_space_offset); | 2288 | rb_erase(&entry->offset_index, &ctl->free_space_offset); |
1329 | ret = tree_insert_offset(&cluster->root, entry->offset, | 2289 | ret = tree_insert_offset(&cluster->root, entry->offset, |
1330 | &entry->offset_index, 0); | 2290 | &entry->offset_index, 0); |
1331 | BUG_ON(ret); | 2291 | BUG_ON(ret); |
2292 | } while (node && entry != last); | ||
1332 | 2293 | ||
1333 | if (!node || entry == last) | 2294 | cluster->max_size = max_extent; |
1334 | break; | ||
1335 | 2295 | ||
2296 | return 0; | ||
2297 | } | ||
2298 | |||
2299 | /* | ||
2300 | * This specifically looks for bitmaps that may work in the cluster, we assume | ||
2301 | * that we have already failed to find extents that will work. | ||
2302 | */ | ||
2303 | static noinline int | ||
2304 | setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | ||
2305 | struct btrfs_free_cluster *cluster, | ||
2306 | struct list_head *bitmaps, u64 offset, u64 bytes, | ||
2307 | u64 min_bytes) | ||
2308 | { | ||
2309 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2310 | struct btrfs_free_space *entry; | ||
2311 | struct rb_node *node; | ||
2312 | int ret = -ENOSPC; | ||
2313 | |||
2314 | if (ctl->total_bitmaps == 0) | ||
2315 | return -ENOSPC; | ||
2316 | |||
2317 | /* | ||
2318 | * First check our cached list of bitmaps and see if there is an entry | ||
2319 | * here that will work. | ||
2320 | */ | ||
2321 | list_for_each_entry(entry, bitmaps, list) { | ||
2322 | if (entry->bytes < min_bytes) | ||
2323 | continue; | ||
2324 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, | ||
2325 | bytes, min_bytes); | ||
2326 | if (!ret) | ||
2327 | return 0; | ||
2328 | } | ||
2329 | |||
2330 | /* | ||
2331 | * If we do have entries on our list and we are here then we didn't find | ||
2332 | * anything, so go ahead and get the next entry after the last entry in | ||
2333 | * this list and start the search from there. | ||
2334 | */ | ||
2335 | if (!list_empty(bitmaps)) { | ||
2336 | entry = list_entry(bitmaps->prev, struct btrfs_free_space, | ||
2337 | list); | ||
2338 | node = rb_next(&entry->offset_index); | ||
2339 | if (!node) | ||
2340 | return -ENOSPC; | ||
1336 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 2341 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
2342 | goto search; | ||
1337 | } | 2343 | } |
1338 | 2344 | ||
1339 | cluster->max_size = max_extent; | 2345 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); |
1340 | got_it: | 2346 | if (!entry) |
1341 | ret = 0; | 2347 | return -ENOSPC; |
1342 | atomic_inc(&block_group->count); | 2348 | |
1343 | list_add_tail(&cluster->block_group_list, &block_group->cluster_list); | 2349 | search: |
1344 | cluster->block_group = block_group; | 2350 | node = &entry->offset_index; |
2351 | do { | ||
2352 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
2353 | node = rb_next(&entry->offset_index); | ||
2354 | if (!entry->bitmap) | ||
2355 | continue; | ||
2356 | if (entry->bytes < min_bytes) | ||
2357 | continue; | ||
2358 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, | ||
2359 | bytes, min_bytes); | ||
2360 | } while (ret && node); | ||
2361 | |||
2362 | return ret; | ||
2363 | } | ||
2364 | |||
2365 | /* | ||
2366 | * here we try to find a cluster of blocks in a block group. The goal | ||
2367 | * is to find at least bytes free and up to empty_size + bytes free. | ||
2368 | * We might not find them all in one contiguous area. | ||
2369 | * | ||
2370 | * returns zero and sets up cluster if things worked out, otherwise | ||
2371 | * it returns -enospc | ||
2372 | */ | ||
2373 | int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | ||
2374 | struct btrfs_root *root, | ||
2375 | struct btrfs_block_group_cache *block_group, | ||
2376 | struct btrfs_free_cluster *cluster, | ||
2377 | u64 offset, u64 bytes, u64 empty_size) | ||
2378 | { | ||
2379 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2380 | struct list_head bitmaps; | ||
2381 | struct btrfs_free_space *entry, *tmp; | ||
2382 | u64 min_bytes; | ||
2383 | int ret; | ||
2384 | |||
2385 | /* for metadata, allow allocates with more holes */ | ||
2386 | if (btrfs_test_opt(root, SSD_SPREAD)) { | ||
2387 | min_bytes = bytes + empty_size; | ||
2388 | } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { | ||
2389 | /* | ||
2390 | * we want to do larger allocations when we are | ||
2391 | * flushing out the delayed refs, it helps prevent | ||
2392 | * making more work as we go along. | ||
2393 | */ | ||
2394 | if (trans->transaction->delayed_refs.flushing) | ||
2395 | min_bytes = max(bytes, (bytes + empty_size) >> 1); | ||
2396 | else | ||
2397 | min_bytes = max(bytes, (bytes + empty_size) >> 4); | ||
2398 | } else | ||
2399 | min_bytes = max(bytes, (bytes + empty_size) >> 2); | ||
2400 | |||
2401 | spin_lock(&ctl->tree_lock); | ||
2402 | |||
2403 | /* | ||
2404 | * If we know we don't have enough space to make a cluster don't even | ||
2405 | * bother doing all the work to try and find one. | ||
2406 | */ | ||
2407 | if (ctl->free_space < min_bytes) { | ||
2408 | spin_unlock(&ctl->tree_lock); | ||
2409 | return -ENOSPC; | ||
2410 | } | ||
2411 | |||
2412 | spin_lock(&cluster->lock); | ||
2413 | |||
2414 | /* someone already found a cluster, hooray */ | ||
2415 | if (cluster->block_group) { | ||
2416 | ret = 0; | ||
2417 | goto out; | ||
2418 | } | ||
2419 | |||
2420 | INIT_LIST_HEAD(&bitmaps); | ||
2421 | ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, | ||
2422 | bytes, min_bytes); | ||
2423 | if (ret) | ||
2424 | ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, | ||
2425 | offset, bytes, min_bytes); | ||
2426 | |||
2427 | /* Clear our temporary list */ | ||
2428 | list_for_each_entry_safe(entry, tmp, &bitmaps, list) | ||
2429 | list_del_init(&entry->list); | ||
2430 | |||
2431 | if (!ret) { | ||
2432 | atomic_inc(&block_group->count); | ||
2433 | list_add_tail(&cluster->block_group_list, | ||
2434 | &block_group->cluster_list); | ||
2435 | cluster->block_group = block_group; | ||
2436 | } | ||
1345 | out: | 2437 | out: |
1346 | spin_unlock(&cluster->lock); | 2438 | spin_unlock(&cluster->lock); |
1347 | spin_unlock(&block_group->tree_lock); | 2439 | spin_unlock(&ctl->tree_lock); |
1348 | 2440 | ||
1349 | return ret; | 2441 | return ret; |
1350 | } | 2442 | } |
@@ -1358,8 +2450,244 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) | |||
1358 | spin_lock_init(&cluster->refill_lock); | 2450 | spin_lock_init(&cluster->refill_lock); |
1359 | cluster->root = RB_ROOT; | 2451 | cluster->root = RB_ROOT; |
1360 | cluster->max_size = 0; | 2452 | cluster->max_size = 0; |
1361 | cluster->points_to_bitmap = false; | ||
1362 | INIT_LIST_HEAD(&cluster->block_group_list); | 2453 | INIT_LIST_HEAD(&cluster->block_group_list); |
1363 | cluster->block_group = NULL; | 2454 | cluster->block_group = NULL; |
1364 | } | 2455 | } |
1365 | 2456 | ||
2457 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | ||
2458 | u64 *trimmed, u64 start, u64 end, u64 minlen) | ||
2459 | { | ||
2460 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2461 | struct btrfs_free_space *entry = NULL; | ||
2462 | struct btrfs_fs_info *fs_info = block_group->fs_info; | ||
2463 | u64 bytes = 0; | ||
2464 | u64 actually_trimmed; | ||
2465 | int ret = 0; | ||
2466 | |||
2467 | *trimmed = 0; | ||
2468 | |||
2469 | while (start < end) { | ||
2470 | spin_lock(&ctl->tree_lock); | ||
2471 | |||
2472 | if (ctl->free_space < minlen) { | ||
2473 | spin_unlock(&ctl->tree_lock); | ||
2474 | break; | ||
2475 | } | ||
2476 | |||
2477 | entry = tree_search_offset(ctl, start, 0, 1); | ||
2478 | if (!entry) | ||
2479 | entry = tree_search_offset(ctl, | ||
2480 | offset_to_bitmap(ctl, start), | ||
2481 | 1, 1); | ||
2482 | |||
2483 | if (!entry || entry->offset >= end) { | ||
2484 | spin_unlock(&ctl->tree_lock); | ||
2485 | break; | ||
2486 | } | ||
2487 | |||
2488 | if (entry->bitmap) { | ||
2489 | ret = search_bitmap(ctl, entry, &start, &bytes); | ||
2490 | if (!ret) { | ||
2491 | if (start >= end) { | ||
2492 | spin_unlock(&ctl->tree_lock); | ||
2493 | break; | ||
2494 | } | ||
2495 | bytes = min(bytes, end - start); | ||
2496 | bitmap_clear_bits(ctl, entry, start, bytes); | ||
2497 | if (entry->bytes == 0) | ||
2498 | free_bitmap(ctl, entry); | ||
2499 | } else { | ||
2500 | start = entry->offset + BITS_PER_BITMAP * | ||
2501 | block_group->sectorsize; | ||
2502 | spin_unlock(&ctl->tree_lock); | ||
2503 | ret = 0; | ||
2504 | continue; | ||
2505 | } | ||
2506 | } else { | ||
2507 | start = entry->offset; | ||
2508 | bytes = min(entry->bytes, end - start); | ||
2509 | unlink_free_space(ctl, entry); | ||
2510 | kmem_cache_free(btrfs_free_space_cachep, entry); | ||
2511 | } | ||
2512 | |||
2513 | spin_unlock(&ctl->tree_lock); | ||
2514 | |||
2515 | if (bytes >= minlen) { | ||
2516 | int update_ret; | ||
2517 | update_ret = btrfs_update_reserved_bytes(block_group, | ||
2518 | bytes, 1, 1); | ||
2519 | |||
2520 | ret = btrfs_error_discard_extent(fs_info->extent_root, | ||
2521 | start, | ||
2522 | bytes, | ||
2523 | &actually_trimmed); | ||
2524 | |||
2525 | btrfs_add_free_space(block_group, start, bytes); | ||
2526 | if (!update_ret) | ||
2527 | btrfs_update_reserved_bytes(block_group, | ||
2528 | bytes, 0, 1); | ||
2529 | |||
2530 | if (ret) | ||
2531 | break; | ||
2532 | *trimmed += actually_trimmed; | ||
2533 | } | ||
2534 | start += bytes; | ||
2535 | bytes = 0; | ||
2536 | |||
2537 | if (fatal_signal_pending(current)) { | ||
2538 | ret = -ERESTARTSYS; | ||
2539 | break; | ||
2540 | } | ||
2541 | |||
2542 | cond_resched(); | ||
2543 | } | ||
2544 | |||
2545 | return ret; | ||
2546 | } | ||
2547 | |||
2548 | /* | ||
2549 | * Find the left-most item in the cache tree, and then return the | ||
2550 | * smallest inode number in the item. | ||
2551 | * | ||
2552 | * Note: the returned inode number may not be the smallest one in | ||
2553 | * the tree, if the left-most item is a bitmap. | ||
2554 | */ | ||
2555 | u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root) | ||
2556 | { | ||
2557 | struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl; | ||
2558 | struct btrfs_free_space *entry = NULL; | ||
2559 | u64 ino = 0; | ||
2560 | |||
2561 | spin_lock(&ctl->tree_lock); | ||
2562 | |||
2563 | if (RB_EMPTY_ROOT(&ctl->free_space_offset)) | ||
2564 | goto out; | ||
2565 | |||
2566 | entry = rb_entry(rb_first(&ctl->free_space_offset), | ||
2567 | struct btrfs_free_space, offset_index); | ||
2568 | |||
2569 | if (!entry->bitmap) { | ||
2570 | ino = entry->offset; | ||
2571 | |||
2572 | unlink_free_space(ctl, entry); | ||
2573 | entry->offset++; | ||
2574 | entry->bytes--; | ||
2575 | if (!entry->bytes) | ||
2576 | kmem_cache_free(btrfs_free_space_cachep, entry); | ||
2577 | else | ||
2578 | link_free_space(ctl, entry); | ||
2579 | } else { | ||
2580 | u64 offset = 0; | ||
2581 | u64 count = 1; | ||
2582 | int ret; | ||
2583 | |||
2584 | ret = search_bitmap(ctl, entry, &offset, &count); | ||
2585 | BUG_ON(ret); | ||
2586 | |||
2587 | ino = offset; | ||
2588 | bitmap_clear_bits(ctl, entry, offset, 1); | ||
2589 | if (entry->bytes == 0) | ||
2590 | free_bitmap(ctl, entry); | ||
2591 | } | ||
2592 | out: | ||
2593 | spin_unlock(&ctl->tree_lock); | ||
2594 | |||
2595 | return ino; | ||
2596 | } | ||
2597 | |||
2598 | struct inode *lookup_free_ino_inode(struct btrfs_root *root, | ||
2599 | struct btrfs_path *path) | ||
2600 | { | ||
2601 | struct inode *inode = NULL; | ||
2602 | |||
2603 | spin_lock(&root->cache_lock); | ||
2604 | if (root->cache_inode) | ||
2605 | inode = igrab(root->cache_inode); | ||
2606 | spin_unlock(&root->cache_lock); | ||
2607 | if (inode) | ||
2608 | return inode; | ||
2609 | |||
2610 | inode = __lookup_free_space_inode(root, path, 0); | ||
2611 | if (IS_ERR(inode)) | ||
2612 | return inode; | ||
2613 | |||
2614 | spin_lock(&root->cache_lock); | ||
2615 | if (!btrfs_fs_closing(root->fs_info)) | ||
2616 | root->cache_inode = igrab(inode); | ||
2617 | spin_unlock(&root->cache_lock); | ||
2618 | |||
2619 | return inode; | ||
2620 | } | ||
2621 | |||
2622 | int create_free_ino_inode(struct btrfs_root *root, | ||
2623 | struct btrfs_trans_handle *trans, | ||
2624 | struct btrfs_path *path) | ||
2625 | { | ||
2626 | return __create_free_space_inode(root, trans, path, | ||
2627 | BTRFS_FREE_INO_OBJECTID, 0); | ||
2628 | } | ||
2629 | |||
2630 | int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) | ||
2631 | { | ||
2632 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
2633 | struct btrfs_path *path; | ||
2634 | struct inode *inode; | ||
2635 | int ret = 0; | ||
2636 | u64 root_gen = btrfs_root_generation(&root->root_item); | ||
2637 | |||
2638 | if (!btrfs_test_opt(root, INODE_MAP_CACHE)) | ||
2639 | return 0; | ||
2640 | |||
2641 | /* | ||
2642 | * If we're unmounting then just return, since this does a search on the | ||
2643 | * normal root and not the commit root and we could deadlock. | ||
2644 | */ | ||
2645 | if (btrfs_fs_closing(fs_info)) | ||
2646 | return 0; | ||
2647 | |||
2648 | path = btrfs_alloc_path(); | ||
2649 | if (!path) | ||
2650 | return 0; | ||
2651 | |||
2652 | inode = lookup_free_ino_inode(root, path); | ||
2653 | if (IS_ERR(inode)) | ||
2654 | goto out; | ||
2655 | |||
2656 | if (root_gen != BTRFS_I(inode)->generation) | ||
2657 | goto out_put; | ||
2658 | |||
2659 | ret = __load_free_space_cache(root, inode, ctl, path, 0); | ||
2660 | |||
2661 | if (ret < 0) | ||
2662 | printk(KERN_ERR "btrfs: failed to load free ino cache for " | ||
2663 | "root %llu\n", root->root_key.objectid); | ||
2664 | out_put: | ||
2665 | iput(inode); | ||
2666 | out: | ||
2667 | btrfs_free_path(path); | ||
2668 | return ret; | ||
2669 | } | ||
2670 | |||
2671 | int btrfs_write_out_ino_cache(struct btrfs_root *root, | ||
2672 | struct btrfs_trans_handle *trans, | ||
2673 | struct btrfs_path *path) | ||
2674 | { | ||
2675 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
2676 | struct inode *inode; | ||
2677 | int ret; | ||
2678 | |||
2679 | if (!btrfs_test_opt(root, INODE_MAP_CACHE)) | ||
2680 | return 0; | ||
2681 | |||
2682 | inode = lookup_free_ino_inode(root, path); | ||
2683 | if (IS_ERR(inode)) | ||
2684 | return 0; | ||
2685 | |||
2686 | ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0); | ||
2687 | if (ret < 0) | ||
2688 | printk(KERN_ERR "btrfs: failed to write free ino cache " | ||
2689 | "for root %llu\n", root->root_key.objectid); | ||
2690 | |||
2691 | iput(inode); | ||
2692 | return ret; | ||
2693 | } | ||