diff options
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r-- | fs/btrfs/free-space-cache.c | 530 |
1 files changed, 375 insertions, 155 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index d1e5f0e84c58..768b9523662d 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -18,6 +18,15 @@ | |||
18 | 18 | ||
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include "ctree.h" | 20 | #include "ctree.h" |
21 | #include "free-space-cache.h" | ||
22 | #include "transaction.h" | ||
23 | |||
24 | struct btrfs_free_space { | ||
25 | struct rb_node bytes_index; | ||
26 | struct rb_node offset_index; | ||
27 | u64 offset; | ||
28 | u64 bytes; | ||
29 | }; | ||
21 | 30 | ||
22 | static int tree_insert_offset(struct rb_root *root, u64 offset, | 31 | static int tree_insert_offset(struct rb_root *root, u64 offset, |
23 | struct rb_node *node) | 32 | struct rb_node *node) |
@@ -68,14 +77,24 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes, | |||
68 | } | 77 | } |
69 | 78 | ||
70 | /* | 79 | /* |
71 | * searches the tree for the given offset. If contains is set we will return | 80 | * searches the tree for the given offset. |
72 | * the free space that contains the given offset. If contains is not set we | 81 | * |
73 | * will return the free space that starts at or after the given offset and is | 82 | * fuzzy == 1: this is used for allocations where we are given a hint of where |
74 | * at least bytes long. | 83 | * to look for free space. Because the hint may not be completely on an offset |
84 | * mark, or the hint may no longer point to free space we need to fudge our | ||
85 | * results a bit. So we look for free space starting at or after offset with at | ||
86 | * least bytes size. We prefer to find as close to the given offset as we can. | ||
87 | * Also if the offset is within a free space range, then we will return the free | ||
88 | * space that contains the given offset, which means we can return a free space | ||
89 | * chunk with an offset before the provided offset. | ||
90 | * | ||
91 | * fuzzy == 0: this is just a normal tree search. Give us the free space that | ||
92 | * starts at the given offset which is at least bytes size, and if its not there | ||
93 | * return NULL. | ||
75 | */ | 94 | */ |
76 | static struct btrfs_free_space *tree_search_offset(struct rb_root *root, | 95 | static struct btrfs_free_space *tree_search_offset(struct rb_root *root, |
77 | u64 offset, u64 bytes, | 96 | u64 offset, u64 bytes, |
78 | int contains) | 97 | int fuzzy) |
79 | { | 98 | { |
80 | struct rb_node *n = root->rb_node; | 99 | struct rb_node *n = root->rb_node; |
81 | struct btrfs_free_space *entry, *ret = NULL; | 100 | struct btrfs_free_space *entry, *ret = NULL; |
@@ -84,13 +103,14 @@ static struct btrfs_free_space *tree_search_offset(struct rb_root *root, | |||
84 | entry = rb_entry(n, struct btrfs_free_space, offset_index); | 103 | entry = rb_entry(n, struct btrfs_free_space, offset_index); |
85 | 104 | ||
86 | if (offset < entry->offset) { | 105 | if (offset < entry->offset) { |
87 | if (!contains && | 106 | if (fuzzy && |
88 | (!ret || entry->offset < ret->offset) && | 107 | (!ret || entry->offset < ret->offset) && |
89 | (bytes <= entry->bytes)) | 108 | (bytes <= entry->bytes)) |
90 | ret = entry; | 109 | ret = entry; |
91 | n = n->rb_left; | 110 | n = n->rb_left; |
92 | } else if (offset > entry->offset) { | 111 | } else if (offset > entry->offset) { |
93 | if ((entry->offset + entry->bytes - 1) >= offset && | 112 | if (fuzzy && |
113 | (entry->offset + entry->bytes - 1) >= offset && | ||
94 | bytes <= entry->bytes) { | 114 | bytes <= entry->bytes) { |
95 | ret = entry; | 115 | ret = entry; |
96 | break; | 116 | break; |
@@ -171,6 +191,7 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, | |||
171 | int ret = 0; | 191 | int ret = 0; |
172 | 192 | ||
173 | 193 | ||
194 | BUG_ON(!info->bytes); | ||
174 | ret = tree_insert_offset(&block_group->free_space_offset, info->offset, | 195 | ret = tree_insert_offset(&block_group->free_space_offset, info->offset, |
175 | &info->offset_index); | 196 | &info->offset_index); |
176 | if (ret) | 197 | if (ret) |
@@ -184,108 +205,70 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, | |||
184 | return ret; | 205 | return ret; |
185 | } | 206 | } |
186 | 207 | ||
187 | static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 208 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, |
188 | u64 offset, u64 bytes) | 209 | u64 offset, u64 bytes) |
189 | { | 210 | { |
190 | struct btrfs_free_space *right_info; | 211 | struct btrfs_free_space *right_info; |
191 | struct btrfs_free_space *left_info; | 212 | struct btrfs_free_space *left_info; |
192 | struct btrfs_free_space *info = NULL; | 213 | struct btrfs_free_space *info = NULL; |
193 | struct btrfs_free_space *alloc_info; | ||
194 | int ret = 0; | 214 | int ret = 0; |
195 | 215 | ||
196 | alloc_info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); | 216 | info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); |
197 | if (!alloc_info) | 217 | if (!info) |
198 | return -ENOMEM; | 218 | return -ENOMEM; |
199 | 219 | ||
220 | info->offset = offset; | ||
221 | info->bytes = bytes; | ||
222 | |||
223 | spin_lock(&block_group->tree_lock); | ||
224 | |||
200 | /* | 225 | /* |
201 | * first we want to see if there is free space adjacent to the range we | 226 | * first we want to see if there is free space adjacent to the range we |
202 | * are adding, if there is remove that struct and add a new one to | 227 | * are adding, if there is remove that struct and add a new one to |
203 | * cover the entire range | 228 | * cover the entire range |
204 | */ | 229 | */ |
205 | right_info = tree_search_offset(&block_group->free_space_offset, | 230 | right_info = tree_search_offset(&block_group->free_space_offset, |
206 | offset+bytes, 0, 1); | 231 | offset+bytes, 0, 0); |
207 | left_info = tree_search_offset(&block_group->free_space_offset, | 232 | left_info = tree_search_offset(&block_group->free_space_offset, |
208 | offset-1, 0, 1); | 233 | offset-1, 0, 1); |
209 | 234 | ||
210 | if (right_info && right_info->offset == offset+bytes) { | 235 | if (right_info) { |
211 | unlink_free_space(block_group, right_info); | 236 | unlink_free_space(block_group, right_info); |
212 | info = right_info; | 237 | info->bytes += right_info->bytes; |
213 | info->offset = offset; | 238 | kfree(right_info); |
214 | info->bytes += bytes; | ||
215 | } else if (right_info && right_info->offset != offset+bytes) { | ||
216 | printk(KERN_ERR "btrfs adding space in the middle of an " | ||
217 | "existing free space area. existing: " | ||
218 | "offset=%llu, bytes=%llu. new: offset=%llu, " | ||
219 | "bytes=%llu\n", (unsigned long long)right_info->offset, | ||
220 | (unsigned long long)right_info->bytes, | ||
221 | (unsigned long long)offset, | ||
222 | (unsigned long long)bytes); | ||
223 | BUG(); | ||
224 | } | 239 | } |
225 | 240 | ||
226 | if (left_info) { | 241 | if (left_info && left_info->offset + left_info->bytes == offset) { |
227 | unlink_free_space(block_group, left_info); | 242 | unlink_free_space(block_group, left_info); |
228 | 243 | info->offset = left_info->offset; | |
229 | if (unlikely((left_info->offset + left_info->bytes) != | 244 | info->bytes += left_info->bytes; |
230 | offset)) { | 245 | kfree(left_info); |
231 | printk(KERN_ERR "btrfs free space to the left " | ||
232 | "of new free space isn't " | ||
233 | "quite right. existing: offset=%llu, " | ||
234 | "bytes=%llu. new: offset=%llu, bytes=%llu\n", | ||
235 | (unsigned long long)left_info->offset, | ||
236 | (unsigned long long)left_info->bytes, | ||
237 | (unsigned long long)offset, | ||
238 | (unsigned long long)bytes); | ||
239 | BUG(); | ||
240 | } | ||
241 | |||
242 | if (info) { | ||
243 | info->offset = left_info->offset; | ||
244 | info->bytes += left_info->bytes; | ||
245 | kfree(left_info); | ||
246 | } else { | ||
247 | info = left_info; | ||
248 | info->bytes += bytes; | ||
249 | } | ||
250 | } | 246 | } |
251 | 247 | ||
252 | if (info) { | ||
253 | ret = link_free_space(block_group, info); | ||
254 | if (!ret) | ||
255 | info = NULL; | ||
256 | goto out; | ||
257 | } | ||
258 | |||
259 | info = alloc_info; | ||
260 | alloc_info = NULL; | ||
261 | info->offset = offset; | ||
262 | info->bytes = bytes; | ||
263 | |||
264 | ret = link_free_space(block_group, info); | 248 | ret = link_free_space(block_group, info); |
265 | if (ret) | 249 | if (ret) |
266 | kfree(info); | 250 | kfree(info); |
267 | out: | 251 | |
252 | spin_unlock(&block_group->tree_lock); | ||
253 | |||
268 | if (ret) { | 254 | if (ret) { |
269 | printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); | 255 | printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); |
270 | if (ret == -EEXIST) | 256 | BUG_ON(ret == -EEXIST); |
271 | BUG(); | ||
272 | } | 257 | } |
273 | 258 | ||
274 | kfree(alloc_info); | ||
275 | |||
276 | return ret; | 259 | return ret; |
277 | } | 260 | } |
278 | 261 | ||
279 | static int | 262 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |
280 | __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | 263 | u64 offset, u64 bytes) |
281 | u64 offset, u64 bytes) | ||
282 | { | 264 | { |
283 | struct btrfs_free_space *info; | 265 | struct btrfs_free_space *info; |
284 | int ret = 0; | 266 | int ret = 0; |
285 | 267 | ||
268 | spin_lock(&block_group->tree_lock); | ||
269 | |||
286 | info = tree_search_offset(&block_group->free_space_offset, offset, 0, | 270 | info = tree_search_offset(&block_group->free_space_offset, offset, 0, |
287 | 1); | 271 | 1); |
288 | |||
289 | if (info && info->offset == offset) { | 272 | if (info && info->offset == offset) { |
290 | if (info->bytes < bytes) { | 273 | if (info->bytes < bytes) { |
291 | printk(KERN_ERR "Found free space at %llu, size %llu," | 274 | printk(KERN_ERR "Found free space at %llu, size %llu," |
@@ -295,12 +278,14 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
295 | (unsigned long long)bytes); | 278 | (unsigned long long)bytes); |
296 | WARN_ON(1); | 279 | WARN_ON(1); |
297 | ret = -EINVAL; | 280 | ret = -EINVAL; |
281 | spin_unlock(&block_group->tree_lock); | ||
298 | goto out; | 282 | goto out; |
299 | } | 283 | } |
300 | unlink_free_space(block_group, info); | 284 | unlink_free_space(block_group, info); |
301 | 285 | ||
302 | if (info->bytes == bytes) { | 286 | if (info->bytes == bytes) { |
303 | kfree(info); | 287 | kfree(info); |
288 | spin_unlock(&block_group->tree_lock); | ||
304 | goto out; | 289 | goto out; |
305 | } | 290 | } |
306 | 291 | ||
@@ -308,6 +293,7 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
308 | info->bytes -= bytes; | 293 | info->bytes -= bytes; |
309 | 294 | ||
310 | ret = link_free_space(block_group, info); | 295 | ret = link_free_space(block_group, info); |
296 | spin_unlock(&block_group->tree_lock); | ||
311 | BUG_ON(ret); | 297 | BUG_ON(ret); |
312 | } else if (info && info->offset < offset && | 298 | } else if (info && info->offset < offset && |
313 | info->offset + info->bytes >= offset + bytes) { | 299 | info->offset + info->bytes >= offset + bytes) { |
@@ -333,70 +319,33 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
333 | */ | 319 | */ |
334 | kfree(info); | 320 | kfree(info); |
335 | } | 321 | } |
336 | 322 | spin_unlock(&block_group->tree_lock); | |
337 | /* step two, insert a new info struct to cover anything | 323 | /* step two, insert a new info struct to cover anything |
338 | * before the hole | 324 | * before the hole |
339 | */ | 325 | */ |
340 | ret = __btrfs_add_free_space(block_group, old_start, | 326 | ret = btrfs_add_free_space(block_group, old_start, |
341 | offset - old_start); | 327 | offset - old_start); |
342 | BUG_ON(ret); | 328 | BUG_ON(ret); |
343 | } else { | 329 | } else { |
330 | spin_unlock(&block_group->tree_lock); | ||
331 | if (!info) { | ||
332 | printk(KERN_ERR "couldn't find space %llu to free\n", | ||
333 | (unsigned long long)offset); | ||
334 | printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n", | ||
335 | block_group->cached, block_group->key.objectid, | ||
336 | block_group->key.offset); | ||
337 | btrfs_dump_free_space(block_group, bytes); | ||
338 | } else if (info) { | ||
339 | printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, " | ||
340 | "but wanted offset=%llu bytes=%llu\n", | ||
341 | info->offset, info->bytes, offset, bytes); | ||
342 | } | ||
344 | WARN_ON(1); | 343 | WARN_ON(1); |
345 | } | 344 | } |
346 | out: | 345 | out: |
347 | return ret; | 346 | return ret; |
348 | } | 347 | } |
349 | 348 | ||
350 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | ||
351 | u64 offset, u64 bytes) | ||
352 | { | ||
353 | int ret; | ||
354 | struct btrfs_free_space *sp; | ||
355 | |||
356 | mutex_lock(&block_group->alloc_mutex); | ||
357 | ret = __btrfs_add_free_space(block_group, offset, bytes); | ||
358 | sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1); | ||
359 | BUG_ON(!sp); | ||
360 | mutex_unlock(&block_group->alloc_mutex); | ||
361 | |||
362 | return ret; | ||
363 | } | ||
364 | |||
365 | int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group, | ||
366 | u64 offset, u64 bytes) | ||
367 | { | ||
368 | int ret; | ||
369 | struct btrfs_free_space *sp; | ||
370 | |||
371 | ret = __btrfs_add_free_space(block_group, offset, bytes); | ||
372 | sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1); | ||
373 | BUG_ON(!sp); | ||
374 | |||
375 | return ret; | ||
376 | } | ||
377 | |||
378 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | ||
379 | u64 offset, u64 bytes) | ||
380 | { | ||
381 | int ret = 0; | ||
382 | |||
383 | mutex_lock(&block_group->alloc_mutex); | ||
384 | ret = __btrfs_remove_free_space(block_group, offset, bytes); | ||
385 | mutex_unlock(&block_group->alloc_mutex); | ||
386 | |||
387 | return ret; | ||
388 | } | ||
389 | |||
390 | int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group, | ||
391 | u64 offset, u64 bytes) | ||
392 | { | ||
393 | int ret; | ||
394 | |||
395 | ret = __btrfs_remove_free_space(block_group, offset, bytes); | ||
396 | |||
397 | return ret; | ||
398 | } | ||
399 | |||
400 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | 349 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, |
401 | u64 bytes) | 350 | u64 bytes) |
402 | { | 351 | { |
@@ -408,6 +357,8 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | |||
408 | info = rb_entry(n, struct btrfs_free_space, offset_index); | 357 | info = rb_entry(n, struct btrfs_free_space, offset_index); |
409 | if (info->bytes >= bytes) | 358 | if (info->bytes >= bytes) |
410 | count++; | 359 | count++; |
360 | printk(KERN_ERR "entry offset %llu, bytes %llu\n", info->offset, | ||
361 | info->bytes); | ||
411 | } | 362 | } |
412 | printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" | 363 | printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" |
413 | "\n", count); | 364 | "\n", count); |
@@ -428,68 +379,337 @@ u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group) | |||
428 | return ret; | 379 | return ret; |
429 | } | 380 | } |
430 | 381 | ||
382 | /* | ||
383 | * for a given cluster, put all of its extents back into the free | ||
384 | * space cache. If the block group passed doesn't match the block group | ||
385 | * pointed to by the cluster, someone else raced in and freed the | ||
386 | * cluster already. In that case, we just return without changing anything | ||
387 | */ | ||
388 | static int | ||
389 | __btrfs_return_cluster_to_free_space( | ||
390 | struct btrfs_block_group_cache *block_group, | ||
391 | struct btrfs_free_cluster *cluster) | ||
392 | { | ||
393 | struct btrfs_free_space *entry; | ||
394 | struct rb_node *node; | ||
395 | |||
396 | spin_lock(&cluster->lock); | ||
397 | if (cluster->block_group != block_group) | ||
398 | goto out; | ||
399 | |||
400 | cluster->window_start = 0; | ||
401 | node = rb_first(&cluster->root); | ||
402 | while(node) { | ||
403 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
404 | node = rb_next(&entry->offset_index); | ||
405 | rb_erase(&entry->offset_index, &cluster->root); | ||
406 | link_free_space(block_group, entry); | ||
407 | } | ||
408 | list_del_init(&cluster->block_group_list); | ||
409 | |||
410 | btrfs_put_block_group(cluster->block_group); | ||
411 | cluster->block_group = NULL; | ||
412 | cluster->root.rb_node = NULL; | ||
413 | out: | ||
414 | spin_unlock(&cluster->lock); | ||
415 | return 0; | ||
416 | } | ||
417 | |||
431 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | 418 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) |
432 | { | 419 | { |
433 | struct btrfs_free_space *info; | 420 | struct btrfs_free_space *info; |
434 | struct rb_node *node; | 421 | struct rb_node *node; |
422 | struct btrfs_free_cluster *cluster; | ||
423 | struct btrfs_free_cluster *safe; | ||
424 | |||
425 | spin_lock(&block_group->tree_lock); | ||
426 | |||
427 | list_for_each_entry_safe(cluster, safe, &block_group->cluster_list, | ||
428 | block_group_list) { | ||
429 | |||
430 | WARN_ON(cluster->block_group != block_group); | ||
431 | __btrfs_return_cluster_to_free_space(block_group, cluster); | ||
432 | } | ||
435 | 433 | ||
436 | mutex_lock(&block_group->alloc_mutex); | ||
437 | while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { | 434 | while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { |
438 | info = rb_entry(node, struct btrfs_free_space, bytes_index); | 435 | info = rb_entry(node, struct btrfs_free_space, bytes_index); |
439 | unlink_free_space(block_group, info); | 436 | unlink_free_space(block_group, info); |
440 | kfree(info); | 437 | kfree(info); |
441 | if (need_resched()) { | 438 | if (need_resched()) { |
442 | mutex_unlock(&block_group->alloc_mutex); | 439 | spin_unlock(&block_group->tree_lock); |
443 | cond_resched(); | 440 | cond_resched(); |
444 | mutex_lock(&block_group->alloc_mutex); | 441 | spin_lock(&block_group->tree_lock); |
445 | } | 442 | } |
446 | } | 443 | } |
447 | mutex_unlock(&block_group->alloc_mutex); | 444 | spin_unlock(&block_group->tree_lock); |
448 | } | 445 | } |
449 | 446 | ||
450 | #if 0 | 447 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, |
451 | static struct btrfs_free_space *btrfs_find_free_space_offset(struct | 448 | u64 offset, u64 bytes, u64 empty_size) |
452 | btrfs_block_group_cache | ||
453 | *block_group, u64 offset, | ||
454 | u64 bytes) | ||
455 | { | 449 | { |
456 | struct btrfs_free_space *ret; | 450 | struct btrfs_free_space *entry = NULL; |
451 | u64 ret = 0; | ||
457 | 452 | ||
458 | mutex_lock(&block_group->alloc_mutex); | 453 | spin_lock(&block_group->tree_lock); |
459 | ret = tree_search_offset(&block_group->free_space_offset, offset, | 454 | entry = tree_search_offset(&block_group->free_space_offset, offset, |
460 | bytes, 0); | 455 | bytes + empty_size, 1); |
461 | mutex_unlock(&block_group->alloc_mutex); | 456 | if (!entry) |
457 | entry = tree_search_bytes(&block_group->free_space_bytes, | ||
458 | offset, bytes + empty_size); | ||
459 | if (entry) { | ||
460 | unlink_free_space(block_group, entry); | ||
461 | ret = entry->offset; | ||
462 | entry->offset += bytes; | ||
463 | entry->bytes -= bytes; | ||
464 | |||
465 | if (!entry->bytes) | ||
466 | kfree(entry); | ||
467 | else | ||
468 | link_free_space(block_group, entry); | ||
469 | } | ||
470 | spin_unlock(&block_group->tree_lock); | ||
462 | 471 | ||
463 | return ret; | 472 | return ret; |
464 | } | 473 | } |
465 | 474 | ||
466 | static struct btrfs_free_space *btrfs_find_free_space_bytes(struct | 475 | /* |
467 | btrfs_block_group_cache | 476 | * given a cluster, put all of its extents back into the free space |
468 | *block_group, u64 offset, | 477 | * cache. If a block group is passed, this function will only free |
469 | u64 bytes) | 478 | * a cluster that belongs to the passed block group. |
479 | * | ||
480 | * Otherwise, it'll get a reference on the block group pointed to by the | ||
481 | * cluster and remove the cluster from it. | ||
482 | */ | ||
483 | int btrfs_return_cluster_to_free_space( | ||
484 | struct btrfs_block_group_cache *block_group, | ||
485 | struct btrfs_free_cluster *cluster) | ||
470 | { | 486 | { |
471 | struct btrfs_free_space *ret; | 487 | int ret; |
472 | 488 | ||
473 | mutex_lock(&block_group->alloc_mutex); | 489 | /* first, get a safe pointer to the block group */ |
490 | spin_lock(&cluster->lock); | ||
491 | if (!block_group) { | ||
492 | block_group = cluster->block_group; | ||
493 | if (!block_group) { | ||
494 | spin_unlock(&cluster->lock); | ||
495 | return 0; | ||
496 | } | ||
497 | } else if (cluster->block_group != block_group) { | ||
498 | /* someone else has already freed it don't redo their work */ | ||
499 | spin_unlock(&cluster->lock); | ||
500 | return 0; | ||
501 | } | ||
502 | atomic_inc(&block_group->count); | ||
503 | spin_unlock(&cluster->lock); | ||
474 | 504 | ||
475 | ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes); | 505 | /* now return any extents the cluster had on it */ |
476 | mutex_unlock(&block_group->alloc_mutex); | 506 | spin_lock(&block_group->tree_lock); |
507 | ret = __btrfs_return_cluster_to_free_space(block_group, cluster); | ||
508 | spin_unlock(&block_group->tree_lock); | ||
477 | 509 | ||
510 | /* finally drop our ref */ | ||
511 | btrfs_put_block_group(block_group); | ||
478 | return ret; | 512 | return ret; |
479 | } | 513 | } |
480 | #endif | ||
481 | 514 | ||
482 | struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache | 515 | /* |
483 | *block_group, u64 offset, | 516 | * given a cluster, try to allocate 'bytes' from it, returns 0 |
484 | u64 bytes) | 517 | * if it couldn't find anything suitably large, or a logical disk offset |
518 | * if things worked out | ||
519 | */ | ||
520 | u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | ||
521 | struct btrfs_free_cluster *cluster, u64 bytes, | ||
522 | u64 min_start) | ||
523 | { | ||
524 | struct btrfs_free_space *entry = NULL; | ||
525 | struct rb_node *node; | ||
526 | u64 ret = 0; | ||
527 | |||
528 | spin_lock(&cluster->lock); | ||
529 | if (bytes > cluster->max_size) | ||
530 | goto out; | ||
531 | |||
532 | if (cluster->block_group != block_group) | ||
533 | goto out; | ||
534 | |||
535 | node = rb_first(&cluster->root); | ||
536 | if (!node) | ||
537 | goto out; | ||
538 | |||
539 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
540 | |||
541 | while(1) { | ||
542 | if (entry->bytes < bytes || entry->offset < min_start) { | ||
543 | struct rb_node *node; | ||
544 | |||
545 | node = rb_next(&entry->offset_index); | ||
546 | if (!node) | ||
547 | break; | ||
548 | entry = rb_entry(node, struct btrfs_free_space, | ||
549 | offset_index); | ||
550 | continue; | ||
551 | } | ||
552 | ret = entry->offset; | ||
553 | |||
554 | entry->offset += bytes; | ||
555 | entry->bytes -= bytes; | ||
556 | |||
557 | if (entry->bytes == 0) { | ||
558 | rb_erase(&entry->offset_index, &cluster->root); | ||
559 | kfree(entry); | ||
560 | } | ||
561 | break; | ||
562 | } | ||
563 | out: | ||
564 | spin_unlock(&cluster->lock); | ||
565 | return ret; | ||
566 | } | ||
567 | |||
568 | /* | ||
569 | * here we try to find a cluster of blocks in a block group. The goal | ||
570 | * is to find at least bytes free and up to empty_size + bytes free. | ||
571 | * We might not find them all in one contiguous area. | ||
572 | * | ||
573 | * returns zero and sets up cluster if things worked out, otherwise | ||
574 | * it returns -enospc | ||
575 | */ | ||
576 | int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | ||
577 | struct btrfs_block_group_cache *block_group, | ||
578 | struct btrfs_free_cluster *cluster, | ||
579 | u64 offset, u64 bytes, u64 empty_size) | ||
485 | { | 580 | { |
486 | struct btrfs_free_space *ret = NULL; | 581 | struct btrfs_free_space *entry = NULL; |
582 | struct rb_node *node; | ||
583 | struct btrfs_free_space *next; | ||
584 | struct btrfs_free_space *last; | ||
585 | u64 min_bytes; | ||
586 | u64 window_start; | ||
587 | u64 window_free; | ||
588 | u64 max_extent = 0; | ||
589 | int total_retries = 0; | ||
590 | int ret; | ||
591 | |||
592 | /* for metadata, allow allocates with more holes */ | ||
593 | if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { | ||
594 | /* | ||
595 | * we want to do larger allocations when we are | ||
596 | * flushing out the delayed refs, it helps prevent | ||
597 | * making more work as we go along. | ||
598 | */ | ||
599 | if (trans->transaction->delayed_refs.flushing) | ||
600 | min_bytes = max(bytes, (bytes + empty_size) >> 1); | ||
601 | else | ||
602 | min_bytes = max(bytes, (bytes + empty_size) >> 4); | ||
603 | } else | ||
604 | min_bytes = max(bytes, (bytes + empty_size) >> 2); | ||
605 | |||
606 | spin_lock(&block_group->tree_lock); | ||
607 | spin_lock(&cluster->lock); | ||
608 | |||
609 | /* someone already found a cluster, hooray */ | ||
610 | if (cluster->block_group) { | ||
611 | ret = 0; | ||
612 | goto out; | ||
613 | } | ||
614 | again: | ||
615 | min_bytes = min(min_bytes, bytes + empty_size); | ||
616 | entry = tree_search_bytes(&block_group->free_space_bytes, | ||
617 | offset, min_bytes); | ||
618 | if (!entry) { | ||
619 | ret = -ENOSPC; | ||
620 | goto out; | ||
621 | } | ||
622 | window_start = entry->offset; | ||
623 | window_free = entry->bytes; | ||
624 | last = entry; | ||
625 | max_extent = entry->bytes; | ||
626 | |||
627 | while(1) { | ||
628 | /* out window is just right, lets fill it */ | ||
629 | if (window_free >= bytes + empty_size) | ||
630 | break; | ||
487 | 631 | ||
488 | ret = tree_search_offset(&block_group->free_space_offset, offset, | 632 | node = rb_next(&last->offset_index); |
489 | bytes, 0); | 633 | if (!node) { |
490 | if (!ret) | 634 | ret = -ENOSPC; |
491 | ret = tree_search_bytes(&block_group->free_space_bytes, | 635 | goto out; |
492 | offset, bytes); | 636 | } |
637 | next = rb_entry(node, struct btrfs_free_space, offset_index); | ||
638 | |||
639 | /* | ||
640 | * we haven't filled the empty size and the window is | ||
641 | * very large. reset and try again | ||
642 | */ | ||
643 | if (next->offset - window_start > (bytes + empty_size) * 2) { | ||
644 | entry = next; | ||
645 | window_start = entry->offset; | ||
646 | window_free = entry->bytes; | ||
647 | last = entry; | ||
648 | max_extent = 0; | ||
649 | total_retries++; | ||
650 | if (total_retries % 256 == 0) { | ||
651 | if (min_bytes >= (bytes + empty_size)) { | ||
652 | ret = -ENOSPC; | ||
653 | goto out; | ||
654 | } | ||
655 | /* | ||
656 | * grow our allocation a bit, we're not having | ||
657 | * much luck | ||
658 | */ | ||
659 | min_bytes *= 2; | ||
660 | goto again; | ||
661 | } | ||
662 | } else { | ||
663 | last = next; | ||
664 | window_free += next->bytes; | ||
665 | if (entry->bytes > max_extent) | ||
666 | max_extent = entry->bytes; | ||
667 | } | ||
668 | } | ||
669 | |||
670 | cluster->window_start = entry->offset; | ||
671 | |||
672 | /* | ||
673 | * now we've found our entries, pull them out of the free space | ||
674 | * cache and put them into the cluster rbtree | ||
675 | * | ||
676 | * The cluster includes an rbtree, but only uses the offset index | ||
677 | * of each free space cache entry. | ||
678 | */ | ||
679 | while(1) { | ||
680 | node = rb_next(&entry->offset_index); | ||
681 | unlink_free_space(block_group, entry); | ||
682 | ret = tree_insert_offset(&cluster->root, entry->offset, | ||
683 | &entry->offset_index); | ||
684 | BUG_ON(ret); | ||
685 | |||
686 | if (!node || entry == last) | ||
687 | break; | ||
688 | |||
689 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
690 | } | ||
691 | ret = 0; | ||
692 | cluster->max_size = max_extent; | ||
693 | atomic_inc(&block_group->count); | ||
694 | list_add_tail(&cluster->block_group_list, &block_group->cluster_list); | ||
695 | cluster->block_group = block_group; | ||
696 | out: | ||
697 | spin_unlock(&cluster->lock); | ||
698 | spin_unlock(&block_group->tree_lock); | ||
493 | 699 | ||
494 | return ret; | 700 | return ret; |
495 | } | 701 | } |
702 | |||
703 | /* | ||
704 | * simple code to zero out a cluster | ||
705 | */ | ||
706 | void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) | ||
707 | { | ||
708 | spin_lock_init(&cluster->lock); | ||
709 | spin_lock_init(&cluster->refill_lock); | ||
710 | cluster->root.rb_node = NULL; | ||
711 | cluster->max_size = 0; | ||
712 | INIT_LIST_HEAD(&cluster->block_group_list); | ||
713 | cluster->block_group = NULL; | ||
714 | } | ||
715 | |||