aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c517
1 files changed, 390 insertions, 127 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a5aca3997d42..dc84daee6bc4 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -21,6 +21,7 @@
21#include <linux/blkdev.h> 21#include <linux/blkdev.h>
22#include <linux/sort.h> 22#include <linux/sort.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/kthread.h>
24#include "compat.h" 25#include "compat.h"
25#include "hash.h" 26#include "hash.h"
26#include "ctree.h" 27#include "ctree.h"
@@ -61,6 +62,13 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
61 struct btrfs_root *extent_root, u64 alloc_bytes, 62 struct btrfs_root *extent_root, u64 alloc_bytes,
62 u64 flags, int force); 63 u64 flags, int force);
63 64
65static noinline int
66block_group_cache_done(struct btrfs_block_group_cache *cache)
67{
68 smp_mb();
69 return cache->cached == BTRFS_CACHE_FINISHED;
70}
71
64static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) 72static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
65{ 73{
66 return (cache->flags & bits) == bits; 74 return (cache->flags & bits) == bits;
@@ -146,20 +154,70 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
146} 154}
147 155
148/* 156/*
157 * We always set EXTENT_LOCKED for the super mirror extents so we don't
158 * overwrite them, so those bits need to be unset. Also, if we are unmounting
159 * with pinned extents still sitting there because we had a block group caching,
160 * we need to clear those now, since we are done.
161 */
162void btrfs_free_pinned_extents(struct btrfs_fs_info *info)
163{
164 u64 start, end, last = 0;
165 int ret;
166
167 while (1) {
168 ret = find_first_extent_bit(&info->pinned_extents, last,
169 &start, &end,
170 EXTENT_LOCKED|EXTENT_DIRTY);
171 if (ret)
172 break;
173
174 clear_extent_bits(&info->pinned_extents, start, end,
175 EXTENT_LOCKED|EXTENT_DIRTY, GFP_NOFS);
176 last = end+1;
177 }
178}
179
180static int remove_sb_from_cache(struct btrfs_root *root,
181 struct btrfs_block_group_cache *cache)
182{
183 struct btrfs_fs_info *fs_info = root->fs_info;
184 u64 bytenr;
185 u64 *logical;
186 int stripe_len;
187 int i, nr, ret;
188
189 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
190 bytenr = btrfs_sb_offset(i);
191 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
192 cache->key.objectid, bytenr,
193 0, &logical, &nr, &stripe_len);
194 BUG_ON(ret);
195 while (nr--) {
196 try_lock_extent(&fs_info->pinned_extents,
197 logical[nr],
198 logical[nr] + stripe_len - 1, GFP_NOFS);
199 }
200 kfree(logical);
201 }
202
203 return 0;
204}
205
206/*
149 * this is only called by cache_block_group, since we could have freed extents 207 * this is only called by cache_block_group, since we could have freed extents
150 * we need to check the pinned_extents for any extents that can't be used yet 208 * we need to check the pinned_extents for any extents that can't be used yet
151 * since their free space will be released as soon as the transaction commits. 209 * since their free space will be released as soon as the transaction commits.
152 */ 210 */
153static int add_new_free_space(struct btrfs_block_group_cache *block_group, 211static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
154 struct btrfs_fs_info *info, u64 start, u64 end) 212 struct btrfs_fs_info *info, u64 start, u64 end)
155{ 213{
156 u64 extent_start, extent_end, size; 214 u64 extent_start, extent_end, size, total_added = 0;
157 int ret; 215 int ret;
158 216
159 while (start < end) { 217 while (start < end) {
160 ret = find_first_extent_bit(&info->pinned_extents, start, 218 ret = find_first_extent_bit(&info->pinned_extents, start,
161 &extent_start, &extent_end, 219 &extent_start, &extent_end,
162 EXTENT_DIRTY); 220 EXTENT_DIRTY|EXTENT_LOCKED);
163 if (ret) 221 if (ret)
164 break; 222 break;
165 223
@@ -167,6 +225,7 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group,
167 start = extent_end + 1; 225 start = extent_end + 1;
168 } else if (extent_start > start && extent_start < end) { 226 } else if (extent_start > start && extent_start < end) {
169 size = extent_start - start; 227 size = extent_start - start;
228 total_added += size;
170 ret = btrfs_add_free_space(block_group, start, 229 ret = btrfs_add_free_space(block_group, start,
171 size); 230 size);
172 BUG_ON(ret); 231 BUG_ON(ret);
@@ -178,84 +237,80 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group,
178 237
179 if (start < end) { 238 if (start < end) {
180 size = end - start; 239 size = end - start;
240 total_added += size;
181 ret = btrfs_add_free_space(block_group, start, size); 241 ret = btrfs_add_free_space(block_group, start, size);
182 BUG_ON(ret); 242 BUG_ON(ret);
183 } 243 }
184 244
185 return 0; 245 return total_added;
186} 246}
187 247
188static int remove_sb_from_cache(struct btrfs_root *root, 248static int caching_kthread(void *data)
189 struct btrfs_block_group_cache *cache)
190{
191 u64 bytenr;
192 u64 *logical;
193 int stripe_len;
194 int i, nr, ret;
195
196 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
197 bytenr = btrfs_sb_offset(i);
198 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
199 cache->key.objectid, bytenr, 0,
200 &logical, &nr, &stripe_len);
201 BUG_ON(ret);
202 while (nr--) {
203 btrfs_remove_free_space(cache, logical[nr],
204 stripe_len);
205 }
206 kfree(logical);
207 }
208 return 0;
209}
210
211static int cache_block_group(struct btrfs_root *root,
212 struct btrfs_block_group_cache *block_group)
213{ 249{
250 struct btrfs_block_group_cache *block_group = data;
251 struct btrfs_fs_info *fs_info = block_group->fs_info;
252 u64 last = 0;
214 struct btrfs_path *path; 253 struct btrfs_path *path;
215 int ret = 0; 254 int ret = 0;
216 struct btrfs_key key; 255 struct btrfs_key key;
217 struct extent_buffer *leaf; 256 struct extent_buffer *leaf;
218 int slot; 257 int slot;
219 u64 last; 258 u64 total_found = 0;
220
221 if (!block_group)
222 return 0;
223 259
224 root = root->fs_info->extent_root; 260 BUG_ON(!fs_info);
225
226 if (block_group->cached)
227 return 0;
228 261
229 path = btrfs_alloc_path(); 262 path = btrfs_alloc_path();
230 if (!path) 263 if (!path)
231 return -ENOMEM; 264 return -ENOMEM;
232 265
233 path->reada = 2; 266 atomic_inc(&block_group->space_info->caching_threads);
267 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
268again:
269 /* need to make sure the commit_root doesn't disappear */
270 down_read(&fs_info->extent_commit_sem);
271
234 /* 272 /*
235 * we get into deadlocks with paths held by callers of this function. 273 * We don't want to deadlock with somebody trying to allocate a new
236 * since the alloc_mutex is protecting things right now, just 274 * extent for the extent root while also trying to search the extent
237 * skip the locking here 275 * root to add free space. So we skip locking and search the commit
276 * root, since its read-only
238 */ 277 */
239 path->skip_locking = 1; 278 path->skip_locking = 1;
240 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); 279 path->search_commit_root = 1;
280 path->reada = 2;
281
241 key.objectid = last; 282 key.objectid = last;
242 key.offset = 0; 283 key.offset = 0;
243 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); 284 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
244 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 285 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
245 if (ret < 0) 286 if (ret < 0)
246 goto err; 287 goto err;
247 288
248 while (1) { 289 while (1) {
290 smp_mb();
291 if (block_group->fs_info->closing > 1) {
292 last = (u64)-1;
293 break;
294 }
295
249 leaf = path->nodes[0]; 296 leaf = path->nodes[0];
250 slot = path->slots[0]; 297 slot = path->slots[0];
251 if (slot >= btrfs_header_nritems(leaf)) { 298 if (slot >= btrfs_header_nritems(leaf)) {
252 ret = btrfs_next_leaf(root, path); 299 ret = btrfs_next_leaf(fs_info->extent_root, path);
253 if (ret < 0) 300 if (ret < 0)
254 goto err; 301 goto err;
255 if (ret == 0) 302 else if (ret)
256 continue;
257 else
258 break; 303 break;
304
305 if (need_resched() ||
306 btrfs_transaction_in_commit(fs_info)) {
307 btrfs_release_path(fs_info->extent_root, path);
308 up_read(&fs_info->extent_commit_sem);
309 schedule_timeout(1);
310 goto again;
311 }
312
313 continue;
259 } 314 }
260 btrfs_item_key_to_cpu(leaf, &key, slot); 315 btrfs_item_key_to_cpu(leaf, &key, slot);
261 if (key.objectid < block_group->key.objectid) 316 if (key.objectid < block_group->key.objectid)
@@ -266,24 +321,59 @@ static int cache_block_group(struct btrfs_root *root,
266 break; 321 break;
267 322
268 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) { 323 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
269 add_new_free_space(block_group, root->fs_info, last, 324 total_found += add_new_free_space(block_group,
270 key.objectid); 325 fs_info, last,
271 326 key.objectid);
272 last = key.objectid + key.offset; 327 last = key.objectid + key.offset;
273 } 328 }
329
330 if (total_found > (1024 * 1024 * 2)) {
331 total_found = 0;
332 wake_up(&block_group->caching_q);
333 }
274next: 334next:
275 path->slots[0]++; 335 path->slots[0]++;
276 } 336 }
337 ret = 0;
277 338
278 add_new_free_space(block_group, root->fs_info, last, 339 total_found += add_new_free_space(block_group, fs_info, last,
279 block_group->key.objectid + 340 block_group->key.objectid +
280 block_group->key.offset); 341 block_group->key.offset);
342
343 spin_lock(&block_group->lock);
344 block_group->cached = BTRFS_CACHE_FINISHED;
345 spin_unlock(&block_group->lock);
281 346
282 block_group->cached = 1;
283 remove_sb_from_cache(root, block_group);
284 ret = 0;
285err: 347err:
286 btrfs_free_path(path); 348 btrfs_free_path(path);
349 up_read(&fs_info->extent_commit_sem);
350 atomic_dec(&block_group->space_info->caching_threads);
351 wake_up(&block_group->caching_q);
352
353 return 0;
354}
355
356static int cache_block_group(struct btrfs_block_group_cache *cache)
357{
358 struct task_struct *tsk;
359 int ret = 0;
360
361 spin_lock(&cache->lock);
362 if (cache->cached != BTRFS_CACHE_NO) {
363 spin_unlock(&cache->lock);
364 return ret;
365 }
366 cache->cached = BTRFS_CACHE_STARTED;
367 spin_unlock(&cache->lock);
368
369 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
370 cache->key.objectid);
371 if (IS_ERR(tsk)) {
372 ret = PTR_ERR(tsk);
373 printk(KERN_ERR "error running thread %d\n", ret);
374 BUG();
375 }
376
287 return ret; 377 return ret;
288} 378}
289 379
@@ -2387,13 +2477,29 @@ fail:
2387 2477
2388} 2478}
2389 2479
2480static struct btrfs_block_group_cache *
2481next_block_group(struct btrfs_root *root,
2482 struct btrfs_block_group_cache *cache)
2483{
2484 struct rb_node *node;
2485 spin_lock(&root->fs_info->block_group_cache_lock);
2486 node = rb_next(&cache->cache_node);
2487 btrfs_put_block_group(cache);
2488 if (node) {
2489 cache = rb_entry(node, struct btrfs_block_group_cache,
2490 cache_node);
2491 atomic_inc(&cache->count);
2492 } else
2493 cache = NULL;
2494 spin_unlock(&root->fs_info->block_group_cache_lock);
2495 return cache;
2496}
2497
2390int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 2498int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2391 struct btrfs_root *root) 2499 struct btrfs_root *root)
2392{ 2500{
2393 struct btrfs_block_group_cache *cache, *entry; 2501 struct btrfs_block_group_cache *cache;
2394 struct rb_node *n;
2395 int err = 0; 2502 int err = 0;
2396 int werr = 0;
2397 struct btrfs_path *path; 2503 struct btrfs_path *path;
2398 u64 last = 0; 2504 u64 last = 0;
2399 2505
@@ -2402,39 +2508,35 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2402 return -ENOMEM; 2508 return -ENOMEM;
2403 2509
2404 while (1) { 2510 while (1) {
2405 cache = NULL; 2511 if (last == 0) {
2406 spin_lock(&root->fs_info->block_group_cache_lock); 2512 err = btrfs_run_delayed_refs(trans, root,
2407 for (n = rb_first(&root->fs_info->block_group_cache_tree); 2513 (unsigned long)-1);
2408 n; n = rb_next(n)) { 2514 BUG_ON(err);
2409 entry = rb_entry(n, struct btrfs_block_group_cache,
2410 cache_node);
2411 if (entry->dirty) {
2412 cache = entry;
2413 break;
2414 }
2415 } 2515 }
2416 spin_unlock(&root->fs_info->block_group_cache_lock);
2417 2516
2418 if (!cache) 2517 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2419 break; 2518 while (cache) {
2519 if (cache->dirty)
2520 break;
2521 cache = next_block_group(root, cache);
2522 }
2523 if (!cache) {
2524 if (last == 0)
2525 break;
2526 last = 0;
2527 continue;
2528 }
2420 2529
2421 cache->dirty = 0; 2530 cache->dirty = 0;
2422 last += cache->key.offset; 2531 last = cache->key.objectid + cache->key.offset;
2423 2532
2424 err = write_one_cache_group(trans, root, 2533 err = write_one_cache_group(trans, root, path, cache);
2425 path, cache); 2534 BUG_ON(err);
2426 /* 2535 btrfs_put_block_group(cache);
2427 * if we fail to write the cache group, we want
2428 * to keep it marked dirty in hopes that a later
2429 * write will work
2430 */
2431 if (err) {
2432 werr = err;
2433 continue;
2434 }
2435 } 2536 }
2537
2436 btrfs_free_path(path); 2538 btrfs_free_path(path);
2437 return werr; 2539 return 0;
2438} 2540}
2439 2541
2440int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) 2542int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
@@ -2484,6 +2586,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2484 found->force_alloc = 0; 2586 found->force_alloc = 0;
2485 *space_info = found; 2587 *space_info = found;
2486 list_add_rcu(&found->list, &info->space_info); 2588 list_add_rcu(&found->list, &info->space_info);
2589 atomic_set(&found->caching_threads, 0);
2487 return 0; 2590 return 0;
2488} 2591}
2489 2592
@@ -2947,13 +3050,9 @@ int btrfs_update_pinned_extents(struct btrfs_root *root,
2947 struct btrfs_block_group_cache *cache; 3050 struct btrfs_block_group_cache *cache;
2948 struct btrfs_fs_info *fs_info = root->fs_info; 3051 struct btrfs_fs_info *fs_info = root->fs_info;
2949 3052
2950 if (pin) { 3053 if (pin)
2951 set_extent_dirty(&fs_info->pinned_extents, 3054 set_extent_dirty(&fs_info->pinned_extents,
2952 bytenr, bytenr + num - 1, GFP_NOFS); 3055 bytenr, bytenr + num - 1, GFP_NOFS);
2953 } else {
2954 clear_extent_dirty(&fs_info->pinned_extents,
2955 bytenr, bytenr + num - 1, GFP_NOFS);
2956 }
2957 3056
2958 while (num > 0) { 3057 while (num > 0) {
2959 cache = btrfs_lookup_block_group(fs_info, bytenr); 3058 cache = btrfs_lookup_block_group(fs_info, bytenr);
@@ -2969,14 +3068,34 @@ int btrfs_update_pinned_extents(struct btrfs_root *root,
2969 spin_unlock(&cache->space_info->lock); 3068 spin_unlock(&cache->space_info->lock);
2970 fs_info->total_pinned += len; 3069 fs_info->total_pinned += len;
2971 } else { 3070 } else {
3071 int unpin = 0;
3072
3073 /*
3074 * in order to not race with the block group caching, we
3075 * only want to unpin the extent if we are cached. If
3076 * we aren't cached, we want to start async caching this
3077 * block group so we can free the extent the next time
3078 * around.
3079 */
2972 spin_lock(&cache->space_info->lock); 3080 spin_lock(&cache->space_info->lock);
2973 spin_lock(&cache->lock); 3081 spin_lock(&cache->lock);
2974 cache->pinned -= len; 3082 unpin = (cache->cached == BTRFS_CACHE_FINISHED);
2975 cache->space_info->bytes_pinned -= len; 3083 if (likely(unpin)) {
3084 cache->pinned -= len;
3085 cache->space_info->bytes_pinned -= len;
3086 fs_info->total_pinned -= len;
3087 }
2976 spin_unlock(&cache->lock); 3088 spin_unlock(&cache->lock);
2977 spin_unlock(&cache->space_info->lock); 3089 spin_unlock(&cache->space_info->lock);
2978 fs_info->total_pinned -= len; 3090
2979 if (cache->cached) 3091 if (likely(unpin))
3092 clear_extent_dirty(&fs_info->pinned_extents,
3093 bytenr, bytenr + len -1,
3094 GFP_NOFS);
3095 else
3096 cache_block_group(cache);
3097
3098 if (unpin)
2980 btrfs_add_free_space(cache, bytenr, len); 3099 btrfs_add_free_space(cache, bytenr, len);
2981 } 3100 }
2982 btrfs_put_block_group(cache); 3101 btrfs_put_block_group(cache);
@@ -3030,6 +3149,7 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
3030 &start, &end, EXTENT_DIRTY); 3149 &start, &end, EXTENT_DIRTY);
3031 if (ret) 3150 if (ret)
3032 break; 3151 break;
3152
3033 set_extent_dirty(copy, start, end, GFP_NOFS); 3153 set_extent_dirty(copy, start, end, GFP_NOFS);
3034 last = end + 1; 3154 last = end + 1;
3035 } 3155 }
@@ -3058,6 +3178,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
3058 3178
3059 cond_resched(); 3179 cond_resched();
3060 } 3180 }
3181
3061 return ret; 3182 return ret;
3062} 3183}
3063 3184
@@ -3436,6 +3557,45 @@ static u64 stripe_align(struct btrfs_root *root, u64 val)
3436} 3557}
3437 3558
3438/* 3559/*
3560 * when we wait for progress in the block group caching, its because
3561 * our allocation attempt failed at least once. So, we must sleep
3562 * and let some progress happen before we try again.
3563 *
3564 * This function will sleep at least once waiting for new free space to
3565 * show up, and then it will check the block group free space numbers
3566 * for our min num_bytes. Another option is to have it go ahead
3567 * and look in the rbtree for a free extent of a given size, but this
3568 * is a good start.
3569 */
3570static noinline int
3571wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
3572 u64 num_bytes)
3573{
3574 DEFINE_WAIT(wait);
3575
3576 prepare_to_wait(&cache->caching_q, &wait, TASK_UNINTERRUPTIBLE);
3577
3578 if (block_group_cache_done(cache)) {
3579 finish_wait(&cache->caching_q, &wait);
3580 return 0;
3581 }
3582 schedule();
3583 finish_wait(&cache->caching_q, &wait);
3584
3585 wait_event(cache->caching_q, block_group_cache_done(cache) ||
3586 (cache->free_space >= num_bytes));
3587 return 0;
3588}
3589
3590enum btrfs_loop_type {
3591 LOOP_CACHED_ONLY = 0,
3592 LOOP_CACHING_NOWAIT = 1,
3593 LOOP_CACHING_WAIT = 2,
3594 LOOP_ALLOC_CHUNK = 3,
3595 LOOP_NO_EMPTY_SIZE = 4,
3596};
3597
3598/*
3439 * walks the btree of allocated extents and find a hole of a given size. 3599 * walks the btree of allocated extents and find a hole of a given size.
3440 * The key ins is changed to record the hole: 3600 * The key ins is changed to record the hole:
3441 * ins->objectid == block start 3601 * ins->objectid == block start
@@ -3460,6 +3620,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
3460 struct btrfs_space_info *space_info; 3620 struct btrfs_space_info *space_info;
3461 int last_ptr_loop = 0; 3621 int last_ptr_loop = 0;
3462 int loop = 0; 3622 int loop = 0;
3623 bool found_uncached_bg = false;
3463 3624
3464 WARN_ON(num_bytes < root->sectorsize); 3625 WARN_ON(num_bytes < root->sectorsize);
3465 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); 3626 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -3491,15 +3652,18 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
3491 search_start = max(search_start, first_logical_byte(root, 0)); 3652 search_start = max(search_start, first_logical_byte(root, 0));
3492 search_start = max(search_start, hint_byte); 3653 search_start = max(search_start, hint_byte);
3493 3654
3494 if (!last_ptr) { 3655 if (!last_ptr)
3495 empty_cluster = 0; 3656 empty_cluster = 0;
3496 loop = 1;
3497 }
3498 3657
3499 if (search_start == hint_byte) { 3658 if (search_start == hint_byte) {
3500 block_group = btrfs_lookup_block_group(root->fs_info, 3659 block_group = btrfs_lookup_block_group(root->fs_info,
3501 search_start); 3660 search_start);
3502 if (block_group && block_group_bits(block_group, data)) { 3661 /*
3662 * we don't want to use the block group if it doesn't match our
3663 * allocation bits, or if its not cached.
3664 */
3665 if (block_group && block_group_bits(block_group, data) &&
3666 block_group_cache_done(block_group)) {
3503 down_read(&space_info->groups_sem); 3667 down_read(&space_info->groups_sem);
3504 if (list_empty(&block_group->list) || 3668 if (list_empty(&block_group->list) ||
3505 block_group->ro) { 3669 block_group->ro) {
@@ -3522,21 +3686,35 @@ search:
3522 down_read(&space_info->groups_sem); 3686 down_read(&space_info->groups_sem);
3523 list_for_each_entry(block_group, &space_info->block_groups, list) { 3687 list_for_each_entry(block_group, &space_info->block_groups, list) {
3524 u64 offset; 3688 u64 offset;
3689 int cached;
3525 3690
3526 atomic_inc(&block_group->count); 3691 atomic_inc(&block_group->count);
3527 search_start = block_group->key.objectid; 3692 search_start = block_group->key.objectid;
3528 3693
3529have_block_group: 3694have_block_group:
3530 if (unlikely(!block_group->cached)) { 3695 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
3531 mutex_lock(&block_group->cache_mutex); 3696 /*
3532 ret = cache_block_group(root, block_group); 3697 * we want to start caching kthreads, but not too many
3533 mutex_unlock(&block_group->cache_mutex); 3698 * right off the bat so we don't overwhelm the system,
3534 if (ret) { 3699 * so only start them if there are less than 2 and we're
3535 btrfs_put_block_group(block_group); 3700 * in the initial allocation phase.
3536 break; 3701 */
3702 if (loop > LOOP_CACHING_NOWAIT ||
3703 atomic_read(&space_info->caching_threads) < 2) {
3704 ret = cache_block_group(block_group);
3705 BUG_ON(ret);
3537 } 3706 }
3538 } 3707 }
3539 3708
3709 cached = block_group_cache_done(block_group);
3710 if (unlikely(!cached)) {
3711 found_uncached_bg = true;
3712
3713 /* if we only want cached bgs, loop */
3714 if (loop == LOOP_CACHED_ONLY)
3715 goto loop;
3716 }
3717
3540 if (unlikely(block_group->ro)) 3718 if (unlikely(block_group->ro))
3541 goto loop; 3719 goto loop;
3542 3720
@@ -3615,14 +3793,21 @@ refill_cluster:
3615 spin_unlock(&last_ptr->refill_lock); 3793 spin_unlock(&last_ptr->refill_lock);
3616 goto checks; 3794 goto checks;
3617 } 3795 }
3796 } else if (!cached && loop > LOOP_CACHING_NOWAIT) {
3797 spin_unlock(&last_ptr->refill_lock);
3798
3799 wait_block_group_cache_progress(block_group,
3800 num_bytes + empty_cluster + empty_size);
3801 goto have_block_group;
3618 } 3802 }
3803
3619 /* 3804 /*
3620 * at this point we either didn't find a cluster 3805 * at this point we either didn't find a cluster
3621 * or we weren't able to allocate a block from our 3806 * or we weren't able to allocate a block from our
3622 * cluster. Free the cluster we've been trying 3807 * cluster. Free the cluster we've been trying
3623 * to use, and go to the next block group 3808 * to use, and go to the next block group
3624 */ 3809 */
3625 if (loop < 2) { 3810 if (loop < LOOP_NO_EMPTY_SIZE) {
3626 btrfs_return_cluster_to_free_space(NULL, 3811 btrfs_return_cluster_to_free_space(NULL,
3627 last_ptr); 3812 last_ptr);
3628 spin_unlock(&last_ptr->refill_lock); 3813 spin_unlock(&last_ptr->refill_lock);
@@ -3633,11 +3818,17 @@ refill_cluster:
3633 3818
3634 offset = btrfs_find_space_for_alloc(block_group, search_start, 3819 offset = btrfs_find_space_for_alloc(block_group, search_start,
3635 num_bytes, empty_size); 3820 num_bytes, empty_size);
3636 if (!offset) 3821 if (!offset && (cached || (!cached &&
3822 loop == LOOP_CACHING_NOWAIT))) {
3637 goto loop; 3823 goto loop;
3824 } else if (!offset && (!cached &&
3825 loop > LOOP_CACHING_NOWAIT)) {
3826 wait_block_group_cache_progress(block_group,
3827 num_bytes + empty_size);
3828 goto have_block_group;
3829 }
3638checks: 3830checks:
3639 search_start = stripe_align(root, offset); 3831 search_start = stripe_align(root, offset);
3640
3641 /* move on to the next group */ 3832 /* move on to the next group */
3642 if (search_start + num_bytes >= search_end) { 3833 if (search_start + num_bytes >= search_end) {
3643 btrfs_add_free_space(block_group, offset, num_bytes); 3834 btrfs_add_free_space(block_group, offset, num_bytes);
@@ -3683,13 +3874,26 @@ loop:
3683 } 3874 }
3684 up_read(&space_info->groups_sem); 3875 up_read(&space_info->groups_sem);
3685 3876
3686 /* loop == 0, try to find a clustered alloc in every block group 3877 /* LOOP_CACHED_ONLY, only search fully cached block groups
3687 * loop == 1, try again after forcing a chunk allocation 3878 * LOOP_CACHING_NOWAIT, search partially cached block groups, but
3688 * loop == 2, set empty_size and empty_cluster to 0 and try again 3879 * dont wait foR them to finish caching
3880 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
3881 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
3882 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
3883 * again
3689 */ 3884 */
3690 if (!ins->objectid && loop < 3 && 3885 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
3691 (empty_size || empty_cluster || allowed_chunk_alloc)) { 3886 (found_uncached_bg || empty_size || empty_cluster ||
3692 if (loop >= 2) { 3887 allowed_chunk_alloc)) {
3888 if (found_uncached_bg) {
3889 found_uncached_bg = false;
3890 if (loop < LOOP_CACHING_WAIT) {
3891 loop++;
3892 goto search;
3893 }
3894 }
3895
3896 if (loop == LOOP_ALLOC_CHUNK) {
3693 empty_size = 0; 3897 empty_size = 0;
3694 empty_cluster = 0; 3898 empty_cluster = 0;
3695 } 3899 }
@@ -3702,7 +3906,7 @@ loop:
3702 space_info->force_alloc = 1; 3906 space_info->force_alloc = 1;
3703 } 3907 }
3704 3908
3705 if (loop < 3) { 3909 if (loop < LOOP_NO_EMPTY_SIZE) {
3706 loop++; 3910 loop++;
3707 goto search; 3911 goto search;
3708 } 3912 }
@@ -3798,7 +4002,7 @@ again:
3798 num_bytes, data, 1); 4002 num_bytes, data, 1);
3799 goto again; 4003 goto again;
3800 } 4004 }
3801 if (ret) { 4005 if (ret == -ENOSPC) {
3802 struct btrfs_space_info *sinfo; 4006 struct btrfs_space_info *sinfo;
3803 4007
3804 sinfo = __find_space_info(root->fs_info, data); 4008 sinfo = __find_space_info(root->fs_info, data);
@@ -3806,7 +4010,6 @@ again:
3806 "wanted %llu\n", (unsigned long long)data, 4010 "wanted %llu\n", (unsigned long long)data,
3807 (unsigned long long)num_bytes); 4011 (unsigned long long)num_bytes);
3808 dump_space_info(sinfo, num_bytes); 4012 dump_space_info(sinfo, num_bytes);
3809 BUG();
3810 } 4013 }
3811 4014
3812 return ret; 4015 return ret;
@@ -3844,7 +4047,9 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3844 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size, 4047 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
3845 empty_size, hint_byte, search_end, ins, 4048 empty_size, hint_byte, search_end, ins,
3846 data); 4049 data);
3847 update_reserved_extents(root, ins->objectid, ins->offset, 1); 4050 if (!ret)
4051 update_reserved_extents(root, ins->objectid, ins->offset, 1);
4052
3848 return ret; 4053 return ret;
3849} 4054}
3850 4055
@@ -4006,9 +4211,9 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4006 struct btrfs_block_group_cache *block_group; 4211 struct btrfs_block_group_cache *block_group;
4007 4212
4008 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); 4213 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
4009 mutex_lock(&block_group->cache_mutex); 4214 cache_block_group(block_group);
4010 cache_block_group(root, block_group); 4215 wait_event(block_group->caching_q,
4011 mutex_unlock(&block_group->cache_mutex); 4216 block_group_cache_done(block_group));
4012 4217
4013 ret = btrfs_remove_free_space(block_group, ins->objectid, 4218 ret = btrfs_remove_free_space(block_group, ins->objectid,
4014 ins->offset); 4219 ins->offset);
@@ -4039,7 +4244,8 @@ static int alloc_tree_block(struct btrfs_trans_handle *trans,
4039 ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes, 4244 ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
4040 empty_size, hint_byte, search_end, 4245 empty_size, hint_byte, search_end,
4041 ins, 0); 4246 ins, 0);
4042 BUG_ON(ret); 4247 if (ret)
4248 return ret;
4043 4249
4044 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { 4250 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4045 if (parent == 0) 4251 if (parent == 0)
@@ -6955,11 +7161,16 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
6955 &info->block_group_cache_tree); 7161 &info->block_group_cache_tree);
6956 spin_unlock(&info->block_group_cache_lock); 7162 spin_unlock(&info->block_group_cache_lock);
6957 7163
6958 btrfs_remove_free_space_cache(block_group);
6959 down_write(&block_group->space_info->groups_sem); 7164 down_write(&block_group->space_info->groups_sem);
6960 list_del(&block_group->list); 7165 list_del(&block_group->list);
6961 up_write(&block_group->space_info->groups_sem); 7166 up_write(&block_group->space_info->groups_sem);
6962 7167
7168 if (block_group->cached == BTRFS_CACHE_STARTED)
7169 wait_event(block_group->caching_q,
7170 block_group_cache_done(block_group));
7171
7172 btrfs_remove_free_space_cache(block_group);
7173
6963 WARN_ON(atomic_read(&block_group->count) != 1); 7174 WARN_ON(atomic_read(&block_group->count) != 1);
6964 kfree(block_group); 7175 kfree(block_group);
6965 7176
@@ -7025,9 +7236,19 @@ int btrfs_read_block_groups(struct btrfs_root *root)
7025 atomic_set(&cache->count, 1); 7236 atomic_set(&cache->count, 1);
7026 spin_lock_init(&cache->lock); 7237 spin_lock_init(&cache->lock);
7027 spin_lock_init(&cache->tree_lock); 7238 spin_lock_init(&cache->tree_lock);
7028 mutex_init(&cache->cache_mutex); 7239 cache->fs_info = info;
7240 init_waitqueue_head(&cache->caching_q);
7029 INIT_LIST_HEAD(&cache->list); 7241 INIT_LIST_HEAD(&cache->list);
7030 INIT_LIST_HEAD(&cache->cluster_list); 7242 INIT_LIST_HEAD(&cache->cluster_list);
7243
7244 /*
7245 * we only want to have 32k of ram per block group for keeping
7246 * track of free space, and if we pass 1/2 of that we want to
7247 * start converting things over to using bitmaps
7248 */
7249 cache->extents_thresh = ((1024 * 32) / 2) /
7250 sizeof(struct btrfs_free_space);
7251
7031 read_extent_buffer(leaf, &cache->item, 7252 read_extent_buffer(leaf, &cache->item,
7032 btrfs_item_ptr_offset(leaf, path->slots[0]), 7253 btrfs_item_ptr_offset(leaf, path->slots[0]),
7033 sizeof(cache->item)); 7254 sizeof(cache->item));
@@ -7036,6 +7257,26 @@ int btrfs_read_block_groups(struct btrfs_root *root)
7036 key.objectid = found_key.objectid + found_key.offset; 7257 key.objectid = found_key.objectid + found_key.offset;
7037 btrfs_release_path(root, path); 7258 btrfs_release_path(root, path);
7038 cache->flags = btrfs_block_group_flags(&cache->item); 7259 cache->flags = btrfs_block_group_flags(&cache->item);
7260 cache->sectorsize = root->sectorsize;
7261
7262 remove_sb_from_cache(root, cache);
7263
7264 /*
7265 * check for two cases, either we are full, and therefore
7266 * don't need to bother with the caching work since we won't
7267 * find any space, or we are empty, and we can just add all
7268 * the space in and be done with it. This saves us _alot_ of
7269 * time, particularly in the full case.
7270 */
7271 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7272 cache->cached = BTRFS_CACHE_FINISHED;
7273 } else if (btrfs_block_group_used(&cache->item) == 0) {
7274 cache->cached = BTRFS_CACHE_FINISHED;
7275 add_new_free_space(cache, root->fs_info,
7276 found_key.objectid,
7277 found_key.objectid +
7278 found_key.offset);
7279 }
7039 7280
7040 ret = update_space_info(info, cache->flags, found_key.offset, 7281 ret = update_space_info(info, cache->flags, found_key.offset,
7041 btrfs_block_group_used(&cache->item), 7282 btrfs_block_group_used(&cache->item),
@@ -7079,10 +7320,19 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7079 cache->key.objectid = chunk_offset; 7320 cache->key.objectid = chunk_offset;
7080 cache->key.offset = size; 7321 cache->key.offset = size;
7081 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 7322 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7323 cache->sectorsize = root->sectorsize;
7324
7325 /*
7326 * we only want to have 32k of ram per block group for keeping track
7327 * of free space, and if we pass 1/2 of that we want to start
7328 * converting things over to using bitmaps
7329 */
7330 cache->extents_thresh = ((1024 * 32) / 2) /
7331 sizeof(struct btrfs_free_space);
7082 atomic_set(&cache->count, 1); 7332 atomic_set(&cache->count, 1);
7083 spin_lock_init(&cache->lock); 7333 spin_lock_init(&cache->lock);
7084 spin_lock_init(&cache->tree_lock); 7334 spin_lock_init(&cache->tree_lock);
7085 mutex_init(&cache->cache_mutex); 7335 init_waitqueue_head(&cache->caching_q);
7086 INIT_LIST_HEAD(&cache->list); 7336 INIT_LIST_HEAD(&cache->list);
7087 INIT_LIST_HEAD(&cache->cluster_list); 7337 INIT_LIST_HEAD(&cache->cluster_list);
7088 7338
@@ -7091,6 +7341,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7091 cache->flags = type; 7341 cache->flags = type;
7092 btrfs_set_block_group_flags(&cache->item, type); 7342 btrfs_set_block_group_flags(&cache->item, type);
7093 7343
7344 cache->cached = BTRFS_CACHE_FINISHED;
7345 remove_sb_from_cache(root, cache);
7346
7347 add_new_free_space(cache, root->fs_info, chunk_offset,
7348 chunk_offset + size);
7349
7094 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, 7350 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7095 &cache->space_info); 7351 &cache->space_info);
7096 BUG_ON(ret); 7352 BUG_ON(ret);
@@ -7149,7 +7405,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7149 rb_erase(&block_group->cache_node, 7405 rb_erase(&block_group->cache_node,
7150 &root->fs_info->block_group_cache_tree); 7406 &root->fs_info->block_group_cache_tree);
7151 spin_unlock(&root->fs_info->block_group_cache_lock); 7407 spin_unlock(&root->fs_info->block_group_cache_lock);
7152 btrfs_remove_free_space_cache(block_group); 7408
7153 down_write(&block_group->space_info->groups_sem); 7409 down_write(&block_group->space_info->groups_sem);
7154 /* 7410 /*
7155 * we must use list_del_init so people can check to see if they 7411 * we must use list_del_init so people can check to see if they
@@ -7158,11 +7414,18 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7158 list_del_init(&block_group->list); 7414 list_del_init(&block_group->list);
7159 up_write(&block_group->space_info->groups_sem); 7415 up_write(&block_group->space_info->groups_sem);
7160 7416
7417 if (block_group->cached == BTRFS_CACHE_STARTED)
7418 wait_event(block_group->caching_q,
7419 block_group_cache_done(block_group));
7420
7421 btrfs_remove_free_space_cache(block_group);
7422
7161 spin_lock(&block_group->space_info->lock); 7423 spin_lock(&block_group->space_info->lock);
7162 block_group->space_info->total_bytes -= block_group->key.offset; 7424 block_group->space_info->total_bytes -= block_group->key.offset;
7163 block_group->space_info->bytes_readonly -= block_group->key.offset; 7425 block_group->space_info->bytes_readonly -= block_group->key.offset;
7164 spin_unlock(&block_group->space_info->lock); 7426 spin_unlock(&block_group->space_info->lock);
7165 block_group->space_info->full = 0; 7427
7428 btrfs_clear_space_info_full(root->fs_info);
7166 7429
7167 btrfs_put_block_group(block_group); 7430 btrfs_put_block_group(block_group);
7168 btrfs_put_block_group(block_group); 7431 btrfs_put_block_group(block_group);