diff options
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r-- | fs/btrfs/free-space-cache.c | 183 |
1 files changed, 57 insertions, 126 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 69b023ff6f72..df19b60eef61 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -182,6 +182,7 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, | |||
182 | int ret = 0; | 182 | int ret = 0; |
183 | 183 | ||
184 | 184 | ||
185 | BUG_ON(!info->bytes); | ||
185 | ret = tree_insert_offset(&block_group->free_space_offset, info->offset, | 186 | ret = tree_insert_offset(&block_group->free_space_offset, info->offset, |
186 | &info->offset_index); | 187 | &info->offset_index); |
187 | if (ret) | 188 | if (ret) |
@@ -195,14 +196,23 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, | |||
195 | return ret; | 196 | return ret; |
196 | } | 197 | } |
197 | 198 | ||
198 | static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 199 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, |
199 | u64 offset, u64 bytes) | 200 | u64 offset, u64 bytes) |
200 | { | 201 | { |
201 | struct btrfs_free_space *right_info; | 202 | struct btrfs_free_space *right_info; |
202 | struct btrfs_free_space *left_info; | 203 | struct btrfs_free_space *left_info; |
203 | struct btrfs_free_space *info = NULL; | 204 | struct btrfs_free_space *info = NULL; |
204 | int ret = 0; | 205 | int ret = 0; |
205 | 206 | ||
207 | info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); | ||
208 | if (!info) | ||
209 | return -ENOMEM; | ||
210 | |||
211 | info->offset = offset; | ||
212 | info->bytes = bytes; | ||
213 | |||
214 | spin_lock(&block_group->tree_lock); | ||
215 | |||
206 | /* | 216 | /* |
207 | * first we want to see if there is free space adjacent to the range we | 217 | * first we want to see if there is free space adjacent to the range we |
208 | * are adding, if there is remove that struct and add a new one to | 218 | * are adding, if there is remove that struct and add a new one to |
@@ -215,42 +225,23 @@ static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
215 | 225 | ||
216 | if (right_info) { | 226 | if (right_info) { |
217 | unlink_free_space(block_group, right_info); | 227 | unlink_free_space(block_group, right_info); |
218 | info = right_info; | 228 | info->bytes += right_info->bytes; |
219 | info->offset = offset; | 229 | kfree(right_info); |
220 | info->bytes += bytes; | ||
221 | } | 230 | } |
222 | 231 | ||
223 | if (left_info && left_info->offset + left_info->bytes == offset) { | 232 | if (left_info && left_info->offset + left_info->bytes == offset) { |
224 | unlink_free_space(block_group, left_info); | 233 | unlink_free_space(block_group, left_info); |
225 | 234 | info->offset = left_info->offset; | |
226 | if (info) { | 235 | info->bytes += left_info->bytes; |
227 | info->offset = left_info->offset; | 236 | kfree(left_info); |
228 | info->bytes += left_info->bytes; | ||
229 | kfree(left_info); | ||
230 | } else { | ||
231 | info = left_info; | ||
232 | info->bytes += bytes; | ||
233 | } | ||
234 | } | ||
235 | |||
236 | if (info) { | ||
237 | ret = link_free_space(block_group, info); | ||
238 | if (ret) | ||
239 | kfree(info); | ||
240 | goto out; | ||
241 | } | 237 | } |
242 | 238 | ||
243 | info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); | ||
244 | if (!info) | ||
245 | return -ENOMEM; | ||
246 | |||
247 | info->offset = offset; | ||
248 | info->bytes = bytes; | ||
249 | |||
250 | ret = link_free_space(block_group, info); | 239 | ret = link_free_space(block_group, info); |
251 | if (ret) | 240 | if (ret) |
252 | kfree(info); | 241 | kfree(info); |
253 | out: | 242 | |
243 | spin_unlock(&block_group->tree_lock); | ||
244 | |||
254 | if (ret) { | 245 | if (ret) { |
255 | printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); | 246 | printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); |
256 | if (ret == -EEXIST) | 247 | if (ret == -EEXIST) |
@@ -260,17 +251,16 @@ out: | |||
260 | return ret; | 251 | return ret; |
261 | } | 252 | } |
262 | 253 | ||
263 | static int | 254 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |
264 | __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | 255 | u64 offset, u64 bytes) |
265 | u64 offset, u64 bytes) | ||
266 | { | 256 | { |
267 | struct btrfs_free_space *info; | 257 | struct btrfs_free_space *info; |
268 | int ret = 0; | 258 | int ret = 0; |
269 | 259 | ||
270 | BUG_ON(!block_group->cached); | 260 | spin_lock(&block_group->tree_lock); |
261 | |||
271 | info = tree_search_offset(&block_group->free_space_offset, offset, 0, | 262 | info = tree_search_offset(&block_group->free_space_offset, offset, 0, |
272 | 1); | 263 | 1); |
273 | |||
274 | if (info && info->offset == offset) { | 264 | if (info && info->offset == offset) { |
275 | if (info->bytes < bytes) { | 265 | if (info->bytes < bytes) { |
276 | printk(KERN_ERR "Found free space at %llu, size %llu," | 266 | printk(KERN_ERR "Found free space at %llu, size %llu," |
@@ -280,12 +270,14 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
280 | (unsigned long long)bytes); | 270 | (unsigned long long)bytes); |
281 | WARN_ON(1); | 271 | WARN_ON(1); |
282 | ret = -EINVAL; | 272 | ret = -EINVAL; |
273 | spin_unlock(&block_group->tree_lock); | ||
283 | goto out; | 274 | goto out; |
284 | } | 275 | } |
285 | unlink_free_space(block_group, info); | 276 | unlink_free_space(block_group, info); |
286 | 277 | ||
287 | if (info->bytes == bytes) { | 278 | if (info->bytes == bytes) { |
288 | kfree(info); | 279 | kfree(info); |
280 | spin_unlock(&block_group->tree_lock); | ||
289 | goto out; | 281 | goto out; |
290 | } | 282 | } |
291 | 283 | ||
@@ -293,6 +285,7 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
293 | info->bytes -= bytes; | 285 | info->bytes -= bytes; |
294 | 286 | ||
295 | ret = link_free_space(block_group, info); | 287 | ret = link_free_space(block_group, info); |
288 | spin_unlock(&block_group->tree_lock); | ||
296 | BUG_ON(ret); | 289 | BUG_ON(ret); |
297 | } else if (info && info->offset < offset && | 290 | } else if (info && info->offset < offset && |
298 | info->offset + info->bytes >= offset + bytes) { | 291 | info->offset + info->bytes >= offset + bytes) { |
@@ -318,14 +311,15 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
318 | */ | 311 | */ |
319 | kfree(info); | 312 | kfree(info); |
320 | } | 313 | } |
321 | 314 | spin_unlock(&block_group->tree_lock); | |
322 | /* step two, insert a new info struct to cover anything | 315 | /* step two, insert a new info struct to cover anything |
323 | * before the hole | 316 | * before the hole |
324 | */ | 317 | */ |
325 | ret = __btrfs_add_free_space(block_group, old_start, | 318 | ret = btrfs_add_free_space(block_group, old_start, |
326 | offset - old_start); | 319 | offset - old_start); |
327 | BUG_ON(ret); | 320 | BUG_ON(ret); |
328 | } else { | 321 | } else { |
322 | spin_unlock(&block_group->tree_lock); | ||
329 | if (!info) { | 323 | if (!info) { |
330 | printk(KERN_ERR "couldn't find space %llu to free\n", | 324 | printk(KERN_ERR "couldn't find space %llu to free\n", |
331 | (unsigned long long)offset); | 325 | (unsigned long long)offset); |
@@ -344,50 +338,6 @@ out: | |||
344 | return ret; | 338 | return ret; |
345 | } | 339 | } |
346 | 340 | ||
347 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | ||
348 | u64 offset, u64 bytes) | ||
349 | { | ||
350 | int ret; | ||
351 | |||
352 | mutex_lock(&block_group->alloc_mutex); | ||
353 | ret = __btrfs_add_free_space(block_group, offset, bytes); | ||
354 | mutex_unlock(&block_group->alloc_mutex); | ||
355 | |||
356 | return ret; | ||
357 | } | ||
358 | |||
359 | int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group, | ||
360 | u64 offset, u64 bytes) | ||
361 | { | ||
362 | int ret; | ||
363 | |||
364 | ret = __btrfs_add_free_space(block_group, offset, bytes); | ||
365 | |||
366 | return ret; | ||
367 | } | ||
368 | |||
369 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | ||
370 | u64 offset, u64 bytes) | ||
371 | { | ||
372 | int ret = 0; | ||
373 | |||
374 | mutex_lock(&block_group->alloc_mutex); | ||
375 | ret = __btrfs_remove_free_space(block_group, offset, bytes); | ||
376 | mutex_unlock(&block_group->alloc_mutex); | ||
377 | |||
378 | return ret; | ||
379 | } | ||
380 | |||
381 | int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group, | ||
382 | u64 offset, u64 bytes) | ||
383 | { | ||
384 | int ret; | ||
385 | |||
386 | ret = __btrfs_remove_free_space(block_group, offset, bytes); | ||
387 | |||
388 | return ret; | ||
389 | } | ||
390 | |||
391 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | 341 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, |
392 | u64 bytes) | 342 | u64 bytes) |
393 | { | 343 | { |
@@ -426,63 +376,44 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | |||
426 | struct btrfs_free_space *info; | 376 | struct btrfs_free_space *info; |
427 | struct rb_node *node; | 377 | struct rb_node *node; |
428 | 378 | ||
429 | mutex_lock(&block_group->alloc_mutex); | 379 | spin_lock(&block_group->tree_lock); |
430 | while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { | 380 | while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { |
431 | info = rb_entry(node, struct btrfs_free_space, bytes_index); | 381 | info = rb_entry(node, struct btrfs_free_space, bytes_index); |
432 | unlink_free_space(block_group, info); | 382 | unlink_free_space(block_group, info); |
433 | kfree(info); | 383 | kfree(info); |
434 | if (need_resched()) { | 384 | if (need_resched()) { |
435 | mutex_unlock(&block_group->alloc_mutex); | 385 | spin_unlock(&block_group->tree_lock); |
436 | cond_resched(); | 386 | cond_resched(); |
437 | mutex_lock(&block_group->alloc_mutex); | 387 | spin_lock(&block_group->tree_lock); |
438 | } | 388 | } |
439 | } | 389 | } |
440 | mutex_unlock(&block_group->alloc_mutex); | 390 | spin_unlock(&block_group->tree_lock); |
441 | } | ||
442 | |||
443 | #if 0 | ||
444 | static struct btrfs_free_space *btrfs_find_free_space_offset(struct | ||
445 | btrfs_block_group_cache | ||
446 | *block_group, u64 offset, | ||
447 | u64 bytes) | ||
448 | { | ||
449 | struct btrfs_free_space *ret; | ||
450 | |||
451 | mutex_lock(&block_group->alloc_mutex); | ||
452 | ret = tree_search_offset(&block_group->free_space_offset, offset, | ||
453 | bytes, 0); | ||
454 | mutex_unlock(&block_group->alloc_mutex); | ||
455 | |||
456 | return ret; | ||
457 | } | 391 | } |
458 | 392 | ||
459 | static struct btrfs_free_space *btrfs_find_free_space_bytes(struct | 393 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, |
460 | btrfs_block_group_cache | 394 | u64 offset, u64 bytes, u64 empty_size) |
461 | *block_group, u64 offset, | ||
462 | u64 bytes) | ||
463 | { | 395 | { |
464 | struct btrfs_free_space *ret; | 396 | struct btrfs_free_space *entry = NULL; |
465 | 397 | u64 ret = 0; | |
466 | mutex_lock(&block_group->alloc_mutex); | ||
467 | |||
468 | ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes); | ||
469 | mutex_unlock(&block_group->alloc_mutex); | ||
470 | |||
471 | return ret; | ||
472 | } | ||
473 | #endif | ||
474 | |||
475 | struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache | ||
476 | *block_group, u64 offset, | ||
477 | u64 bytes) | ||
478 | { | ||
479 | struct btrfs_free_space *ret = NULL; | ||
480 | 398 | ||
481 | ret = tree_search_offset(&block_group->free_space_offset, offset, | 399 | spin_lock(&block_group->tree_lock); |
482 | bytes, 1); | 400 | entry = tree_search_offset(&block_group->free_space_offset, offset, |
483 | if (!ret) | 401 | bytes + empty_size, 1); |
484 | ret = tree_search_bytes(&block_group->free_space_bytes, | 402 | if (!entry) |
485 | offset, bytes); | 403 | entry = tree_search_bytes(&block_group->free_space_bytes, |
404 | offset, bytes + empty_size); | ||
405 | if (entry) { | ||
406 | unlink_free_space(block_group, entry); | ||
407 | ret = entry->offset; | ||
408 | entry->offset += bytes; | ||
409 | entry->bytes -= bytes; | ||
410 | |||
411 | if (!entry->bytes) | ||
412 | kfree(entry); | ||
413 | else | ||
414 | link_free_space(block_group, entry); | ||
415 | } | ||
416 | spin_unlock(&block_group->tree_lock); | ||
486 | 417 | ||
487 | return ret; | 418 | return ret; |
488 | } | 419 | } |