aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/free-space-cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r--fs/btrfs/free-space-cache.c1058
1 files changed, 845 insertions, 213 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 4538e48581a5..5edcee3a617f 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -16,45 +16,46 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/pagemap.h>
19#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/math64.h>
20#include "ctree.h" 22#include "ctree.h"
21#include "free-space-cache.h" 23#include "free-space-cache.h"
22#include "transaction.h" 24#include "transaction.h"
23 25
24struct btrfs_free_space { 26#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
25 struct rb_node bytes_index; 27#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
26 struct rb_node offset_index;
27 u64 offset;
28 u64 bytes;
29};
30 28
31static int tree_insert_offset(struct rb_root *root, u64 offset, 29static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize,
32 struct rb_node *node) 30 u64 offset)
33{ 31{
34 struct rb_node **p = &root->rb_node; 32 BUG_ON(offset < bitmap_start);
35 struct rb_node *parent = NULL; 33 offset -= bitmap_start;
36 struct btrfs_free_space *info; 34 return (unsigned long)(div64_u64(offset, sectorsize));
35}
37 36
38 while (*p) { 37static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize)
39 parent = *p; 38{
40 info = rb_entry(parent, struct btrfs_free_space, offset_index); 39 return (unsigned long)(div64_u64(bytes, sectorsize));
40}
41 41
42 if (offset < info->offset) 42static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group,
43 p = &(*p)->rb_left; 43 u64 offset)
44 else if (offset > info->offset) 44{
45 p = &(*p)->rb_right; 45 u64 bitmap_start;
46 else 46 u64 bytes_per_bitmap;
47 return -EEXIST;
48 }
49 47
50 rb_link_node(node, parent, p); 48 bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize;
51 rb_insert_color(node, root); 49 bitmap_start = offset - block_group->key.objectid;
50 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
51 bitmap_start *= bytes_per_bitmap;
52 bitmap_start += block_group->key.objectid;
52 53
53 return 0; 54 return bitmap_start;
54} 55}
55 56
56static int tree_insert_bytes(struct rb_root *root, u64 bytes, 57static int tree_insert_offset(struct rb_root *root, u64 offset,
57 struct rb_node *node) 58 struct rb_node *node, int bitmap)
58{ 59{
59 struct rb_node **p = &root->rb_node; 60 struct rb_node **p = &root->rb_node;
60 struct rb_node *parent = NULL; 61 struct rb_node *parent = NULL;
@@ -62,12 +63,34 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes,
62 63
63 while (*p) { 64 while (*p) {
64 parent = *p; 65 parent = *p;
65 info = rb_entry(parent, struct btrfs_free_space, bytes_index); 66 info = rb_entry(parent, struct btrfs_free_space, offset_index);
66 67
67 if (bytes < info->bytes) 68 if (offset < info->offset) {
68 p = &(*p)->rb_left; 69 p = &(*p)->rb_left;
69 else 70 } else if (offset > info->offset) {
70 p = &(*p)->rb_right; 71 p = &(*p)->rb_right;
72 } else {
73 /*
74 * we could have a bitmap entry and an extent entry
75 * share the same offset. If this is the case, we want
76 * the extent entry to always be found first if we do a
77 * linear search through the tree, since we want to have
78 * the quickest allocation time, and allocating from an
79 * extent is faster than allocating from a bitmap. So
80 * if we're inserting a bitmap and we find an entry at
81 * this offset, we want to go right, or after this entry
82 * logically. If we are inserting an extent and we've
83 * found a bitmap, we want to go left, or before
84 * logically.
85 */
86 if (bitmap) {
87 WARN_ON(info->bitmap);
88 p = &(*p)->rb_right;
89 } else {
90 WARN_ON(!info->bitmap);
91 p = &(*p)->rb_left;
92 }
93 }
71 } 94 }
72 95
73 rb_link_node(node, parent, p); 96 rb_link_node(node, parent, p);
@@ -79,110 +102,143 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes,
79/* 102/*
80 * searches the tree for the given offset. 103 * searches the tree for the given offset.
81 * 104 *
82 * fuzzy == 1: this is used for allocations where we are given a hint of where 105 * fuzzy - If this is set, then we are trying to make an allocation, and we just
83 * to look for free space. Because the hint may not be completely on an offset 106 * want a section that has at least bytes size and comes at or after the given
84 * mark, or the hint may no longer point to free space we need to fudge our 107 * offset.
85 * results a bit. So we look for free space starting at or after offset with at
86 * least bytes size. We prefer to find as close to the given offset as we can.
87 * Also if the offset is within a free space range, then we will return the free
88 * space that contains the given offset, which means we can return a free space
89 * chunk with an offset before the provided offset.
90 *
91 * fuzzy == 0: this is just a normal tree search. Give us the free space that
92 * starts at the given offset which is at least bytes size, and if its not there
93 * return NULL.
94 */ 108 */
95static struct btrfs_free_space *tree_search_offset(struct rb_root *root, 109static struct btrfs_free_space *
96 u64 offset, u64 bytes, 110tree_search_offset(struct btrfs_block_group_cache *block_group,
97 int fuzzy) 111 u64 offset, int bitmap_only, int fuzzy)
98{ 112{
99 struct rb_node *n = root->rb_node; 113 struct rb_node *n = block_group->free_space_offset.rb_node;
100 struct btrfs_free_space *entry, *ret = NULL; 114 struct btrfs_free_space *entry, *prev = NULL;
115
116 /* find entry that is closest to the 'offset' */
117 while (1) {
118 if (!n) {
119 entry = NULL;
120 break;
121 }
101 122
102 while (n) {
103 entry = rb_entry(n, struct btrfs_free_space, offset_index); 123 entry = rb_entry(n, struct btrfs_free_space, offset_index);
124 prev = entry;
104 125
105 if (offset < entry->offset) { 126 if (offset < entry->offset)
106 if (fuzzy &&
107 (!ret || entry->offset < ret->offset) &&
108 (bytes <= entry->bytes))
109 ret = entry;
110 n = n->rb_left; 127 n = n->rb_left;
111 } else if (offset > entry->offset) { 128 else if (offset > entry->offset)
112 if (fuzzy &&
113 (entry->offset + entry->bytes - 1) >= offset &&
114 bytes <= entry->bytes) {
115 ret = entry;
116 break;
117 }
118 n = n->rb_right; 129 n = n->rb_right;
119 } else { 130 else
120 if (bytes > entry->bytes) {
121 n = n->rb_right;
122 continue;
123 }
124 ret = entry;
125 break; 131 break;
126 }
127 } 132 }
128 133
129 return ret; 134 if (bitmap_only) {
130} 135 if (!entry)
136 return NULL;
137 if (entry->bitmap)
138 return entry;
131 139
132/* 140 /*
133 * return a chunk at least bytes size, as close to offset that we can get. 141 * bitmap entry and extent entry may share same offset,
134 */ 142 * in that case, bitmap entry comes after extent entry.
135static struct btrfs_free_space *tree_search_bytes(struct rb_root *root, 143 */
136 u64 offset, u64 bytes) 144 n = rb_next(n);
137{ 145 if (!n)
138 struct rb_node *n = root->rb_node; 146 return NULL;
139 struct btrfs_free_space *entry, *ret = NULL; 147 entry = rb_entry(n, struct btrfs_free_space, offset_index);
140 148 if (entry->offset != offset)
141 while (n) { 149 return NULL;
142 entry = rb_entry(n, struct btrfs_free_space, bytes_index);
143 150
144 if (bytes < entry->bytes) { 151 WARN_ON(!entry->bitmap);
152 return entry;
153 } else if (entry) {
154 if (entry->bitmap) {
145 /* 155 /*
146 * We prefer to get a hole size as close to the size we 156 * if previous extent entry covers the offset,
147 * are asking for so we don't take small slivers out of 157 * we should return it instead of the bitmap entry
148 * huge holes, but we also want to get as close to the
149 * offset as possible so we don't have a whole lot of
150 * fragmentation.
151 */ 158 */
152 if (offset <= entry->offset) { 159 n = &entry->offset_index;
153 if (!ret) 160 while (1) {
154 ret = entry; 161 n = rb_prev(n);
155 else if (entry->bytes < ret->bytes) 162 if (!n)
156 ret = entry; 163 break;
157 else if (entry->offset < ret->offset) 164 prev = rb_entry(n, struct btrfs_free_space,
158 ret = entry; 165 offset_index);
166 if (!prev->bitmap) {
167 if (prev->offset + prev->bytes > offset)
168 entry = prev;
169 break;
170 }
159 } 171 }
160 n = n->rb_left; 172 }
161 } else if (bytes > entry->bytes) { 173 return entry;
162 n = n->rb_right; 174 }
175
176 if (!prev)
177 return NULL;
178
179 /* find last entry before the 'offset' */
180 entry = prev;
181 if (entry->offset > offset) {
182 n = rb_prev(&entry->offset_index);
183 if (n) {
184 entry = rb_entry(n, struct btrfs_free_space,
185 offset_index);
186 BUG_ON(entry->offset > offset);
163 } else { 187 } else {
164 /* 188 if (fuzzy)
165 * Ok we may have multiple chunks of the wanted size, 189 return entry;
166 * so we don't want to take the first one we find, we 190 else
167 * want to take the one closest to our given offset, so 191 return NULL;
168 * keep searching just in case theres a better match.
169 */
170 n = n->rb_right;
171 if (offset > entry->offset)
172 continue;
173 else if (!ret || entry->offset < ret->offset)
174 ret = entry;
175 } 192 }
176 } 193 }
177 194
178 return ret; 195 if (entry->bitmap) {
196 n = &entry->offset_index;
197 while (1) {
198 n = rb_prev(n);
199 if (!n)
200 break;
201 prev = rb_entry(n, struct btrfs_free_space,
202 offset_index);
203 if (!prev->bitmap) {
204 if (prev->offset + prev->bytes > offset)
205 return prev;
206 break;
207 }
208 }
209 if (entry->offset + BITS_PER_BITMAP *
210 block_group->sectorsize > offset)
211 return entry;
212 } else if (entry->offset + entry->bytes > offset)
213 return entry;
214
215 if (!fuzzy)
216 return NULL;
217
218 while (1) {
219 if (entry->bitmap) {
220 if (entry->offset + BITS_PER_BITMAP *
221 block_group->sectorsize > offset)
222 break;
223 } else {
224 if (entry->offset + entry->bytes > offset)
225 break;
226 }
227
228 n = rb_next(&entry->offset_index);
229 if (!n)
230 return NULL;
231 entry = rb_entry(n, struct btrfs_free_space, offset_index);
232 }
233 return entry;
179} 234}
180 235
181static void unlink_free_space(struct btrfs_block_group_cache *block_group, 236static void unlink_free_space(struct btrfs_block_group_cache *block_group,
182 struct btrfs_free_space *info) 237 struct btrfs_free_space *info)
183{ 238{
184 rb_erase(&info->offset_index, &block_group->free_space_offset); 239 rb_erase(&info->offset_index, &block_group->free_space_offset);
185 rb_erase(&info->bytes_index, &block_group->free_space_bytes); 240 block_group->free_extents--;
241 block_group->free_space -= info->bytes;
186} 242}
187 243
188static int link_free_space(struct btrfs_block_group_cache *block_group, 244static int link_free_space(struct btrfs_block_group_cache *block_group,
@@ -190,17 +246,353 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
190{ 246{
191 int ret = 0; 247 int ret = 0;
192 248
193 249 BUG_ON(!info->bitmap && !info->bytes);
194 BUG_ON(!info->bytes);
195 ret = tree_insert_offset(&block_group->free_space_offset, info->offset, 250 ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
196 &info->offset_index); 251 &info->offset_index, (info->bitmap != NULL));
197 if (ret) 252 if (ret)
198 return ret; 253 return ret;
199 254
200 ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes, 255 block_group->free_space += info->bytes;
201 &info->bytes_index); 256 block_group->free_extents++;
202 if (ret) 257 return ret;
203 return ret; 258}
259
260static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
261{
262 u64 max_bytes, possible_bytes;
263
264 /*
265 * The goal is to keep the total amount of memory used per 1gb of space
266 * at or below 32k, so we need to adjust how much memory we allow to be
267 * used by extent based free space tracking
268 */
269 max_bytes = MAX_CACHE_BYTES_PER_GIG *
270 (div64_u64(block_group->key.offset, 1024 * 1024 * 1024));
271
272 possible_bytes = (block_group->total_bitmaps * PAGE_CACHE_SIZE) +
273 (sizeof(struct btrfs_free_space) *
274 block_group->extents_thresh);
275
276 if (possible_bytes > max_bytes) {
277 int extent_bytes = max_bytes -
278 (block_group->total_bitmaps * PAGE_CACHE_SIZE);
279
280 if (extent_bytes <= 0) {
281 block_group->extents_thresh = 0;
282 return;
283 }
284
285 block_group->extents_thresh = extent_bytes /
286 (sizeof(struct btrfs_free_space));
287 }
288}
289
290static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group,
291 struct btrfs_free_space *info, u64 offset,
292 u64 bytes)
293{
294 unsigned long start, end;
295 unsigned long i;
296
297 start = offset_to_bit(info->offset, block_group->sectorsize, offset);
298 end = start + bytes_to_bits(bytes, block_group->sectorsize);
299 BUG_ON(end > BITS_PER_BITMAP);
300
301 for (i = start; i < end; i++)
302 clear_bit(i, info->bitmap);
303
304 info->bytes -= bytes;
305 block_group->free_space -= bytes;
306}
307
308static void bitmap_set_bits(struct btrfs_block_group_cache *block_group,
309 struct btrfs_free_space *info, u64 offset,
310 u64 bytes)
311{
312 unsigned long start, end;
313 unsigned long i;
314
315 start = offset_to_bit(info->offset, block_group->sectorsize, offset);
316 end = start + bytes_to_bits(bytes, block_group->sectorsize);
317 BUG_ON(end > BITS_PER_BITMAP);
318
319 for (i = start; i < end; i++)
320 set_bit(i, info->bitmap);
321
322 info->bytes += bytes;
323 block_group->free_space += bytes;
324}
325
326static int search_bitmap(struct btrfs_block_group_cache *block_group,
327 struct btrfs_free_space *bitmap_info, u64 *offset,
328 u64 *bytes)
329{
330 unsigned long found_bits = 0;
331 unsigned long bits, i;
332 unsigned long next_zero;
333
334 i = offset_to_bit(bitmap_info->offset, block_group->sectorsize,
335 max_t(u64, *offset, bitmap_info->offset));
336 bits = bytes_to_bits(*bytes, block_group->sectorsize);
337
338 for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
339 i < BITS_PER_BITMAP;
340 i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
341 next_zero = find_next_zero_bit(bitmap_info->bitmap,
342 BITS_PER_BITMAP, i);
343 if ((next_zero - i) >= bits) {
344 found_bits = next_zero - i;
345 break;
346 }
347 i = next_zero;
348 }
349
350 if (found_bits) {
351 *offset = (u64)(i * block_group->sectorsize) +
352 bitmap_info->offset;
353 *bytes = (u64)(found_bits) * block_group->sectorsize;
354 return 0;
355 }
356
357 return -1;
358}
359
360static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
361 *block_group, u64 *offset,
362 u64 *bytes, int debug)
363{
364 struct btrfs_free_space *entry;
365 struct rb_node *node;
366 int ret;
367
368 if (!block_group->free_space_offset.rb_node)
369 return NULL;
370
371 entry = tree_search_offset(block_group,
372 offset_to_bitmap(block_group, *offset),
373 0, 1);
374 if (!entry)
375 return NULL;
376
377 for (node = &entry->offset_index; node; node = rb_next(node)) {
378 entry = rb_entry(node, struct btrfs_free_space, offset_index);
379 if (entry->bytes < *bytes)
380 continue;
381
382 if (entry->bitmap) {
383 ret = search_bitmap(block_group, entry, offset, bytes);
384 if (!ret)
385 return entry;
386 continue;
387 }
388
389 *offset = entry->offset;
390 *bytes = entry->bytes;
391 return entry;
392 }
393
394 return NULL;
395}
396
397static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
398 struct btrfs_free_space *info, u64 offset)
399{
400 u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
401 int max_bitmaps = (int)div64_u64(block_group->key.offset +
402 bytes_per_bg - 1, bytes_per_bg);
403 BUG_ON(block_group->total_bitmaps >= max_bitmaps);
404
405 info->offset = offset_to_bitmap(block_group, offset);
406 link_free_space(block_group, info);
407 block_group->total_bitmaps++;
408
409 recalculate_thresholds(block_group);
410}
411
412static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
413 struct btrfs_free_space *bitmap_info,
414 u64 *offset, u64 *bytes)
415{
416 u64 end;
417 u64 search_start, search_bytes;
418 int ret;
419
420again:
421 end = bitmap_info->offset +
422 (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1;
423
424 /*
425 * XXX - this can go away after a few releases.
426 *
427 * since the only user of btrfs_remove_free_space is the tree logging
428 * stuff, and the only way to test that is under crash conditions, we
429 * want to have this debug stuff here just in case somethings not
430 * working. Search the bitmap for the space we are trying to use to
431 * make sure its actually there. If its not there then we need to stop
432 * because something has gone wrong.
433 */
434 search_start = *offset;
435 search_bytes = *bytes;
436 ret = search_bitmap(block_group, bitmap_info, &search_start,
437 &search_bytes);
438 BUG_ON(ret < 0 || search_start != *offset);
439
440 if (*offset > bitmap_info->offset && *offset + *bytes > end) {
441 bitmap_clear_bits(block_group, bitmap_info, *offset,
442 end - *offset + 1);
443 *bytes -= end - *offset + 1;
444 *offset = end + 1;
445 } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
446 bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes);
447 *bytes = 0;
448 }
449
450 if (*bytes) {
451 struct rb_node *next = rb_next(&bitmap_info->offset_index);
452 if (!bitmap_info->bytes) {
453 unlink_free_space(block_group, bitmap_info);
454 kfree(bitmap_info->bitmap);
455 kfree(bitmap_info);
456 block_group->total_bitmaps--;
457 recalculate_thresholds(block_group);
458 }
459
460 /*
461 * no entry after this bitmap, but we still have bytes to
462 * remove, so something has gone wrong.
463 */
464 if (!next)
465 return -EINVAL;
466
467 bitmap_info = rb_entry(next, struct btrfs_free_space,
468 offset_index);
469
470 /*
471 * if the next entry isn't a bitmap we need to return to let the
472 * extent stuff do its work.
473 */
474 if (!bitmap_info->bitmap)
475 return -EAGAIN;
476
477 /*
478 * Ok the next item is a bitmap, but it may not actually hold
479 * the information for the rest of this free space stuff, so
480 * look for it, and if we don't find it return so we can try
481 * everything over again.
482 */
483 search_start = *offset;
484 search_bytes = *bytes;
485 ret = search_bitmap(block_group, bitmap_info, &search_start,
486 &search_bytes);
487 if (ret < 0 || search_start != *offset)
488 return -EAGAIN;
489
490 goto again;
491 } else if (!bitmap_info->bytes) {
492 unlink_free_space(block_group, bitmap_info);
493 kfree(bitmap_info->bitmap);
494 kfree(bitmap_info);
495 block_group->total_bitmaps--;
496 recalculate_thresholds(block_group);
497 }
498
499 return 0;
500}
501
502static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
503 struct btrfs_free_space *info)
504{
505 struct btrfs_free_space *bitmap_info;
506 int added = 0;
507 u64 bytes, offset, end;
508 int ret;
509
510 /*
511 * If we are below the extents threshold then we can add this as an
512 * extent, and don't have to deal with the bitmap
513 */
514 if (block_group->free_extents < block_group->extents_thresh &&
515 info->bytes > block_group->sectorsize * 4)
516 return 0;
517
518 /*
519 * some block groups are so tiny they can't be enveloped by a bitmap, so
520 * don't even bother to create a bitmap for this
521 */
522 if (BITS_PER_BITMAP * block_group->sectorsize >
523 block_group->key.offset)
524 return 0;
525
526 bytes = info->bytes;
527 offset = info->offset;
528
529again:
530 bitmap_info = tree_search_offset(block_group,
531 offset_to_bitmap(block_group, offset),
532 1, 0);
533 if (!bitmap_info) {
534 BUG_ON(added);
535 goto new_bitmap;
536 }
537
538 end = bitmap_info->offset +
539 (u64)(BITS_PER_BITMAP * block_group->sectorsize);
540
541 if (offset >= bitmap_info->offset && offset + bytes > end) {
542 bitmap_set_bits(block_group, bitmap_info, offset,
543 end - offset);
544 bytes -= end - offset;
545 offset = end;
546 added = 0;
547 } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
548 bitmap_set_bits(block_group, bitmap_info, offset, bytes);
549 bytes = 0;
550 } else {
551 BUG();
552 }
553
554 if (!bytes) {
555 ret = 1;
556 goto out;
557 } else
558 goto again;
559
560new_bitmap:
561 if (info && info->bitmap) {
562 add_new_bitmap(block_group, info, offset);
563 added = 1;
564 info = NULL;
565 goto again;
566 } else {
567 spin_unlock(&block_group->tree_lock);
568
569 /* no pre-allocated info, allocate a new one */
570 if (!info) {
571 info = kzalloc(sizeof(struct btrfs_free_space),
572 GFP_NOFS);
573 if (!info) {
574 spin_lock(&block_group->tree_lock);
575 ret = -ENOMEM;
576 goto out;
577 }
578 }
579
580 /* allocate the bitmap */
581 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
582 spin_lock(&block_group->tree_lock);
583 if (!info->bitmap) {
584 ret = -ENOMEM;
585 goto out;
586 }
587 goto again;
588 }
589
590out:
591 if (info) {
592 if (info->bitmap)
593 kfree(info->bitmap);
594 kfree(info);
595 }
204 596
205 return ret; 597 return ret;
206} 598}
@@ -208,8 +600,8 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
208int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, 600int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
209 u64 offset, u64 bytes) 601 u64 offset, u64 bytes)
210{ 602{
211 struct btrfs_free_space *right_info; 603 struct btrfs_free_space *right_info = NULL;
212 struct btrfs_free_space *left_info; 604 struct btrfs_free_space *left_info = NULL;
213 struct btrfs_free_space *info = NULL; 605 struct btrfs_free_space *info = NULL;
214 int ret = 0; 606 int ret = 0;
215 607
@@ -227,18 +619,38 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
227 * are adding, if there is remove that struct and add a new one to 619 * are adding, if there is remove that struct and add a new one to
228 * cover the entire range 620 * cover the entire range
229 */ 621 */
230 right_info = tree_search_offset(&block_group->free_space_offset, 622 right_info = tree_search_offset(block_group, offset + bytes, 0, 0);
231 offset+bytes, 0, 0); 623 if (right_info && rb_prev(&right_info->offset_index))
232 left_info = tree_search_offset(&block_group->free_space_offset, 624 left_info = rb_entry(rb_prev(&right_info->offset_index),
233 offset-1, 0, 1); 625 struct btrfs_free_space, offset_index);
626 else
627 left_info = tree_search_offset(block_group, offset - 1, 0, 0);
628
629 /*
630 * If there was no extent directly to the left or right of this new
631 * extent then we know we're going to have to allocate a new extent, so
632 * before we do that see if we need to drop this into a bitmap
633 */
634 if ((!left_info || left_info->bitmap) &&
635 (!right_info || right_info->bitmap)) {
636 ret = insert_into_bitmap(block_group, info);
637
638 if (ret < 0) {
639 goto out;
640 } else if (ret) {
641 ret = 0;
642 goto out;
643 }
644 }
234 645
235 if (right_info) { 646 if (right_info && !right_info->bitmap) {
236 unlink_free_space(block_group, right_info); 647 unlink_free_space(block_group, right_info);
237 info->bytes += right_info->bytes; 648 info->bytes += right_info->bytes;
238 kfree(right_info); 649 kfree(right_info);
239 } 650 }
240 651
241 if (left_info && left_info->offset + left_info->bytes == offset) { 652 if (left_info && !left_info->bitmap &&
653 left_info->offset + left_info->bytes == offset) {
242 unlink_free_space(block_group, left_info); 654 unlink_free_space(block_group, left_info);
243 info->offset = left_info->offset; 655 info->offset = left_info->offset;
244 info->bytes += left_info->bytes; 656 info->bytes += left_info->bytes;
@@ -248,11 +660,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
248 ret = link_free_space(block_group, info); 660 ret = link_free_space(block_group, info);
249 if (ret) 661 if (ret)
250 kfree(info); 662 kfree(info);
251 663out:
252 spin_unlock(&block_group->tree_lock); 664 spin_unlock(&block_group->tree_lock);
253 665
254 if (ret) { 666 if (ret) {
255 printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); 667 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
256 BUG_ON(ret == -EEXIST); 668 BUG_ON(ret == -EEXIST);
257 } 669 }
258 670
@@ -263,40 +675,74 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
263 u64 offset, u64 bytes) 675 u64 offset, u64 bytes)
264{ 676{
265 struct btrfs_free_space *info; 677 struct btrfs_free_space *info;
678 struct btrfs_free_space *next_info = NULL;
266 int ret = 0; 679 int ret = 0;
267 680
268 spin_lock(&block_group->tree_lock); 681 spin_lock(&block_group->tree_lock);
269 682
270 info = tree_search_offset(&block_group->free_space_offset, offset, 0, 683again:
271 1); 684 info = tree_search_offset(block_group, offset, 0, 0);
272 if (info && info->offset == offset) { 685 if (!info) {
273 if (info->bytes < bytes) { 686 /*
274 printk(KERN_ERR "Found free space at %llu, size %llu," 687 * oops didn't find an extent that matched the space we wanted
275 "trying to use %llu\n", 688 * to remove, look for a bitmap instead
276 (unsigned long long)info->offset, 689 */
277 (unsigned long long)info->bytes, 690 info = tree_search_offset(block_group,
278 (unsigned long long)bytes); 691 offset_to_bitmap(block_group, offset),
692 1, 0);
693 if (!info) {
694 WARN_ON(1);
695 goto out_lock;
696 }
697 }
698
699 if (info->bytes < bytes && rb_next(&info->offset_index)) {
700 u64 end;
701 next_info = rb_entry(rb_next(&info->offset_index),
702 struct btrfs_free_space,
703 offset_index);
704
705 if (next_info->bitmap)
706 end = next_info->offset + BITS_PER_BITMAP *
707 block_group->sectorsize - 1;
708 else
709 end = next_info->offset + next_info->bytes;
710
711 if (next_info->bytes < bytes ||
712 next_info->offset > offset || offset > end) {
713 printk(KERN_CRIT "Found free space at %llu, size %llu,"
714 " trying to use %llu\n",
715 (unsigned long long)info->offset,
716 (unsigned long long)info->bytes,
717 (unsigned long long)bytes);
279 WARN_ON(1); 718 WARN_ON(1);
280 ret = -EINVAL; 719 ret = -EINVAL;
281 spin_unlock(&block_group->tree_lock); 720 goto out_lock;
282 goto out;
283 } 721 }
284 unlink_free_space(block_group, info);
285 722
286 if (info->bytes == bytes) { 723 info = next_info;
287 kfree(info); 724 }
288 spin_unlock(&block_group->tree_lock); 725
289 goto out; 726 if (info->bytes == bytes) {
727 unlink_free_space(block_group, info);
728 if (info->bitmap) {
729 kfree(info->bitmap);
730 block_group->total_bitmaps--;
290 } 731 }
732 kfree(info);
733 goto out_lock;
734 }
291 735
736 if (!info->bitmap && info->offset == offset) {
737 unlink_free_space(block_group, info);
292 info->offset += bytes; 738 info->offset += bytes;
293 info->bytes -= bytes; 739 info->bytes -= bytes;
740 link_free_space(block_group, info);
741 goto out_lock;
742 }
294 743
295 ret = link_free_space(block_group, info); 744 if (!info->bitmap && info->offset <= offset &&
296 spin_unlock(&block_group->tree_lock); 745 info->offset + info->bytes >= offset + bytes) {
297 BUG_ON(ret);
298 } else if (info && info->offset < offset &&
299 info->offset + info->bytes >= offset + bytes) {
300 u64 old_start = info->offset; 746 u64 old_start = info->offset;
301 /* 747 /*
302 * we're freeing space in the middle of the info, 748 * we're freeing space in the middle of the info,
@@ -312,7 +758,9 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
312 info->offset = offset + bytes; 758 info->offset = offset + bytes;
313 info->bytes = old_end - info->offset; 759 info->bytes = old_end - info->offset;
314 ret = link_free_space(block_group, info); 760 ret = link_free_space(block_group, info);
315 BUG_ON(ret); 761 WARN_ON(ret);
762 if (ret)
763 goto out_lock;
316 } else { 764 } else {
317 /* the hole we're creating ends at the end 765 /* the hole we're creating ends at the end
318 * of the info struct, just free the info 766 * of the info struct, just free the info
@@ -320,32 +768,22 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
320 kfree(info); 768 kfree(info);
321 } 769 }
322 spin_unlock(&block_group->tree_lock); 770 spin_unlock(&block_group->tree_lock);
323 /* step two, insert a new info struct to cover anything 771
324 * before the hole 772 /* step two, insert a new info struct to cover
773 * anything before the hole
325 */ 774 */
326 ret = btrfs_add_free_space(block_group, old_start, 775 ret = btrfs_add_free_space(block_group, old_start,
327 offset - old_start); 776 offset - old_start);
328 BUG_ON(ret); 777 WARN_ON(ret);
329 } else { 778 goto out;
330 spin_unlock(&block_group->tree_lock);
331 if (!info) {
332 printk(KERN_ERR "couldn't find space %llu to free\n",
333 (unsigned long long)offset);
334 printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n",
335 block_group->cached,
336 (unsigned long long)block_group->key.objectid,
337 (unsigned long long)block_group->key.offset);
338 btrfs_dump_free_space(block_group, bytes);
339 } else if (info) {
340 printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, "
341 "but wanted offset=%llu bytes=%llu\n",
342 (unsigned long long)info->offset,
343 (unsigned long long)info->bytes,
344 (unsigned long long)offset,
345 (unsigned long long)bytes);
346 }
347 WARN_ON(1);
348 } 779 }
780
781 ret = remove_from_bitmap(block_group, info, &offset, &bytes);
782 if (ret == -EAGAIN)
783 goto again;
784 BUG_ON(ret);
785out_lock:
786 spin_unlock(&block_group->tree_lock);
349out: 787out:
350 return ret; 788 return ret;
351} 789}
@@ -361,10 +799,13 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
361 info = rb_entry(n, struct btrfs_free_space, offset_index); 799 info = rb_entry(n, struct btrfs_free_space, offset_index);
362 if (info->bytes >= bytes) 800 if (info->bytes >= bytes)
363 count++; 801 count++;
364 printk(KERN_ERR "entry offset %llu, bytes %llu\n", 802 printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
365 (unsigned long long)info->offset, 803 (unsigned long long)info->offset,
366 (unsigned long long)info->bytes); 804 (unsigned long long)info->bytes,
805 (info->bitmap) ? "yes" : "no");
367 } 806 }
807 printk(KERN_INFO "block group has cluster?: %s\n",
808 list_empty(&block_group->cluster_list) ? "no" : "yes");
368 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" 809 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
369 "\n", count); 810 "\n", count);
370} 811}
@@ -397,26 +838,35 @@ __btrfs_return_cluster_to_free_space(
397{ 838{
398 struct btrfs_free_space *entry; 839 struct btrfs_free_space *entry;
399 struct rb_node *node; 840 struct rb_node *node;
841 bool bitmap;
400 842
401 spin_lock(&cluster->lock); 843 spin_lock(&cluster->lock);
402 if (cluster->block_group != block_group) 844 if (cluster->block_group != block_group)
403 goto out; 845 goto out;
404 846
847 bitmap = cluster->points_to_bitmap;
848 cluster->block_group = NULL;
405 cluster->window_start = 0; 849 cluster->window_start = 0;
850 list_del_init(&cluster->block_group_list);
851 cluster->points_to_bitmap = false;
852
853 if (bitmap)
854 goto out;
855
406 node = rb_first(&cluster->root); 856 node = rb_first(&cluster->root);
407 while(node) { 857 while (node) {
408 entry = rb_entry(node, struct btrfs_free_space, offset_index); 858 entry = rb_entry(node, struct btrfs_free_space, offset_index);
409 node = rb_next(&entry->offset_index); 859 node = rb_next(&entry->offset_index);
410 rb_erase(&entry->offset_index, &cluster->root); 860 rb_erase(&entry->offset_index, &cluster->root);
411 link_free_space(block_group, entry); 861 BUG_ON(entry->bitmap);
862 tree_insert_offset(&block_group->free_space_offset,
863 entry->offset, &entry->offset_index, 0);
412 } 864 }
413 list_del_init(&cluster->block_group_list);
414
415 btrfs_put_block_group(cluster->block_group);
416 cluster->block_group = NULL;
417 cluster->root.rb_node = NULL; 865 cluster->root.rb_node = NULL;
866
418out: 867out:
419 spin_unlock(&cluster->lock); 868 spin_unlock(&cluster->lock);
869 btrfs_put_block_group(block_group);
420 return 0; 870 return 0;
421} 871}
422 872
@@ -425,20 +875,28 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
425 struct btrfs_free_space *info; 875 struct btrfs_free_space *info;
426 struct rb_node *node; 876 struct rb_node *node;
427 struct btrfs_free_cluster *cluster; 877 struct btrfs_free_cluster *cluster;
428 struct btrfs_free_cluster *safe; 878 struct list_head *head;
429 879
430 spin_lock(&block_group->tree_lock); 880 spin_lock(&block_group->tree_lock);
431 881 while ((head = block_group->cluster_list.next) !=
432 list_for_each_entry_safe(cluster, safe, &block_group->cluster_list, 882 &block_group->cluster_list) {
433 block_group_list) { 883 cluster = list_entry(head, struct btrfs_free_cluster,
884 block_group_list);
434 885
435 WARN_ON(cluster->block_group != block_group); 886 WARN_ON(cluster->block_group != block_group);
436 __btrfs_return_cluster_to_free_space(block_group, cluster); 887 __btrfs_return_cluster_to_free_space(block_group, cluster);
888 if (need_resched()) {
889 spin_unlock(&block_group->tree_lock);
890 cond_resched();
891 spin_lock(&block_group->tree_lock);
892 }
437 } 893 }
438 894
439 while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { 895 while ((node = rb_last(&block_group->free_space_offset)) != NULL) {
440 info = rb_entry(node, struct btrfs_free_space, bytes_index); 896 info = rb_entry(node, struct btrfs_free_space, offset_index);
441 unlink_free_space(block_group, info); 897 unlink_free_space(block_group, info);
898 if (info->bitmap)
899 kfree(info->bitmap);
442 kfree(info); 900 kfree(info);
443 if (need_resched()) { 901 if (need_resched()) {
444 spin_unlock(&block_group->tree_lock); 902 spin_unlock(&block_group->tree_lock);
@@ -446,6 +904,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
446 spin_lock(&block_group->tree_lock); 904 spin_lock(&block_group->tree_lock);
447 } 905 }
448 } 906 }
907
449 spin_unlock(&block_group->tree_lock); 908 spin_unlock(&block_group->tree_lock);
450} 909}
451 910
@@ -453,25 +912,35 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
453 u64 offset, u64 bytes, u64 empty_size) 912 u64 offset, u64 bytes, u64 empty_size)
454{ 913{
455 struct btrfs_free_space *entry = NULL; 914 struct btrfs_free_space *entry = NULL;
915 u64 bytes_search = bytes + empty_size;
456 u64 ret = 0; 916 u64 ret = 0;
457 917
458 spin_lock(&block_group->tree_lock); 918 spin_lock(&block_group->tree_lock);
459 entry = tree_search_offset(&block_group->free_space_offset, offset, 919 entry = find_free_space(block_group, &offset, &bytes_search, 0);
460 bytes + empty_size, 1);
461 if (!entry) 920 if (!entry)
462 entry = tree_search_bytes(&block_group->free_space_bytes, 921 goto out;
463 offset, bytes + empty_size); 922
464 if (entry) { 923 ret = offset;
924 if (entry->bitmap) {
925 bitmap_clear_bits(block_group, entry, offset, bytes);
926 if (!entry->bytes) {
927 unlink_free_space(block_group, entry);
928 kfree(entry->bitmap);
929 kfree(entry);
930 block_group->total_bitmaps--;
931 recalculate_thresholds(block_group);
932 }
933 } else {
465 unlink_free_space(block_group, entry); 934 unlink_free_space(block_group, entry);
466 ret = entry->offset;
467 entry->offset += bytes; 935 entry->offset += bytes;
468 entry->bytes -= bytes; 936 entry->bytes -= bytes;
469
470 if (!entry->bytes) 937 if (!entry->bytes)
471 kfree(entry); 938 kfree(entry);
472 else 939 else
473 link_free_space(block_group, entry); 940 link_free_space(block_group, entry);
474 } 941 }
942
943out:
475 spin_unlock(&block_group->tree_lock); 944 spin_unlock(&block_group->tree_lock);
476 945
477 return ret; 946 return ret;
@@ -517,6 +986,54 @@ int btrfs_return_cluster_to_free_space(
517 return ret; 986 return ret;
518} 987}
519 988
989static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
990 struct btrfs_free_cluster *cluster,
991 u64 bytes, u64 min_start)
992{
993 struct btrfs_free_space *entry;
994 int err;
995 u64 search_start = cluster->window_start;
996 u64 search_bytes = bytes;
997 u64 ret = 0;
998
999 spin_lock(&block_group->tree_lock);
1000 spin_lock(&cluster->lock);
1001
1002 if (!cluster->points_to_bitmap)
1003 goto out;
1004
1005 if (cluster->block_group != block_group)
1006 goto out;
1007
1008 /*
1009 * search_start is the beginning of the bitmap, but at some point it may
1010 * be a good idea to point to the actual start of the free area in the
1011 * bitmap, so do the offset_to_bitmap trick anyway, and set bitmap_only
1012 * to 1 to make sure we get the bitmap entry
1013 */
1014 entry = tree_search_offset(block_group,
1015 offset_to_bitmap(block_group, search_start),
1016 1, 0);
1017 if (!entry || !entry->bitmap)
1018 goto out;
1019
1020 search_start = min_start;
1021 search_bytes = bytes;
1022
1023 err = search_bitmap(block_group, entry, &search_start,
1024 &search_bytes);
1025 if (err)
1026 goto out;
1027
1028 ret = search_start;
1029 bitmap_clear_bits(block_group, entry, ret, bytes);
1030out:
1031 spin_unlock(&cluster->lock);
1032 spin_unlock(&block_group->tree_lock);
1033
1034 return ret;
1035}
1036
520/* 1037/*
521 * given a cluster, try to allocate 'bytes' from it, returns 0 1038 * given a cluster, try to allocate 'bytes' from it, returns 0
522 * if it couldn't find anything suitably large, or a logical disk offset 1039 * if it couldn't find anything suitably large, or a logical disk offset
@@ -530,6 +1047,10 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
530 struct rb_node *node; 1047 struct rb_node *node;
531 u64 ret = 0; 1048 u64 ret = 0;
532 1049
1050 if (cluster->points_to_bitmap)
1051 return btrfs_alloc_from_bitmap(block_group, cluster, bytes,
1052 min_start);
1053
533 spin_lock(&cluster->lock); 1054 spin_lock(&cluster->lock);
534 if (bytes > cluster->max_size) 1055 if (bytes > cluster->max_size)
535 goto out; 1056 goto out;
@@ -567,9 +1088,73 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
567 } 1088 }
568out: 1089out:
569 spin_unlock(&cluster->lock); 1090 spin_unlock(&cluster->lock);
1091
570 return ret; 1092 return ret;
571} 1093}
572 1094
1095static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
1096 struct btrfs_free_space *entry,
1097 struct btrfs_free_cluster *cluster,
1098 u64 offset, u64 bytes, u64 min_bytes)
1099{
1100 unsigned long next_zero;
1101 unsigned long i;
1102 unsigned long search_bits;
1103 unsigned long total_bits;
1104 unsigned long found_bits;
1105 unsigned long start = 0;
1106 unsigned long total_found = 0;
1107 bool found = false;
1108
1109 i = offset_to_bit(entry->offset, block_group->sectorsize,
1110 max_t(u64, offset, entry->offset));
1111 search_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
1112 total_bits = bytes_to_bits(bytes, block_group->sectorsize);
1113
1114again:
1115 found_bits = 0;
1116 for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
1117 i < BITS_PER_BITMAP;
1118 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
1119 next_zero = find_next_zero_bit(entry->bitmap,
1120 BITS_PER_BITMAP, i);
1121 if (next_zero - i >= search_bits) {
1122 found_bits = next_zero - i;
1123 break;
1124 }
1125 i = next_zero;
1126 }
1127
1128 if (!found_bits)
1129 return -1;
1130
1131 if (!found) {
1132 start = i;
1133 found = true;
1134 }
1135
1136 total_found += found_bits;
1137
1138 if (cluster->max_size < found_bits * block_group->sectorsize)
1139 cluster->max_size = found_bits * block_group->sectorsize;
1140
1141 if (total_found < total_bits) {
1142 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero);
1143 if (i - start > total_bits * 2) {
1144 total_found = 0;
1145 cluster->max_size = 0;
1146 found = false;
1147 }
1148 goto again;
1149 }
1150
1151 cluster->window_start = start * block_group->sectorsize +
1152 entry->offset;
1153 cluster->points_to_bitmap = true;
1154
1155 return 0;
1156}
1157
573/* 1158/*
574 * here we try to find a cluster of blocks in a block group. The goal 1159 * here we try to find a cluster of blocks in a block group. The goal
575 * is to find at least bytes free and up to empty_size + bytes free. 1160 * is to find at least bytes free and up to empty_size + bytes free.
@@ -587,12 +1172,12 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
587 struct btrfs_free_space *entry = NULL; 1172 struct btrfs_free_space *entry = NULL;
588 struct rb_node *node; 1173 struct rb_node *node;
589 struct btrfs_free_space *next; 1174 struct btrfs_free_space *next;
590 struct btrfs_free_space *last; 1175 struct btrfs_free_space *last = NULL;
591 u64 min_bytes; 1176 u64 min_bytes;
592 u64 window_start; 1177 u64 window_start;
593 u64 window_free; 1178 u64 window_free;
594 u64 max_extent = 0; 1179 u64 max_extent = 0;
595 int total_retries = 0; 1180 bool found_bitmap = false;
596 int ret; 1181 int ret;
597 1182
598 /* for metadata, allow allocates with more holes */ 1183 /* for metadata, allow allocates with more holes */
@@ -620,31 +1205,80 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
620 goto out; 1205 goto out;
621 } 1206 }
622again: 1207again:
623 min_bytes = min(min_bytes, bytes + empty_size); 1208 entry = tree_search_offset(block_group, offset, found_bitmap, 1);
624 entry = tree_search_bytes(&block_group->free_space_bytes,
625 offset, min_bytes);
626 if (!entry) { 1209 if (!entry) {
627 ret = -ENOSPC; 1210 ret = -ENOSPC;
628 goto out; 1211 goto out;
629 } 1212 }
1213
1214 /*
1215 * If found_bitmap is true, we exhausted our search for extent entries,
1216 * and we just want to search all of the bitmaps that we can find, and
1217 * ignore any extent entries we find.
1218 */
1219 while (entry->bitmap || found_bitmap ||
1220 (!entry->bitmap && entry->bytes < min_bytes)) {
1221 struct rb_node *node = rb_next(&entry->offset_index);
1222
1223 if (entry->bitmap && entry->bytes > bytes + empty_size) {
1224 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
1225 offset, bytes + empty_size,
1226 min_bytes);
1227 if (!ret)
1228 goto got_it;
1229 }
1230
1231 if (!node) {
1232 ret = -ENOSPC;
1233 goto out;
1234 }
1235 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1236 }
1237
1238 /*
1239 * We already searched all the extent entries from the passed in offset
1240 * to the end and didn't find enough space for the cluster, and we also
1241 * didn't find any bitmaps that met our criteria, just go ahead and exit
1242 */
1243 if (found_bitmap) {
1244 ret = -ENOSPC;
1245 goto out;
1246 }
1247
1248 cluster->points_to_bitmap = false;
630 window_start = entry->offset; 1249 window_start = entry->offset;
631 window_free = entry->bytes; 1250 window_free = entry->bytes;
632 last = entry; 1251 last = entry;
633 max_extent = entry->bytes; 1252 max_extent = entry->bytes;
634 1253
635 while(1) { 1254 while (1) {
636 /* out window is just right, lets fill it */ 1255 /* out window is just right, lets fill it */
637 if (window_free >= bytes + empty_size) 1256 if (window_free >= bytes + empty_size)
638 break; 1257 break;
639 1258
640 node = rb_next(&last->offset_index); 1259 node = rb_next(&last->offset_index);
641 if (!node) { 1260 if (!node) {
1261 if (found_bitmap)
1262 goto again;
642 ret = -ENOSPC; 1263 ret = -ENOSPC;
643 goto out; 1264 goto out;
644 } 1265 }
645 next = rb_entry(node, struct btrfs_free_space, offset_index); 1266 next = rb_entry(node, struct btrfs_free_space, offset_index);
646 1267
647 /* 1268 /*
1269 * we found a bitmap, so if this search doesn't result in a
1270 * cluster, we know to go and search again for the bitmaps and
1271 * start looking for space there
1272 */
1273 if (next->bitmap) {
1274 if (!found_bitmap)
1275 offset = next->offset;
1276 found_bitmap = true;
1277 last = next;
1278 continue;
1279 }
1280
1281 /*
648 * we haven't filled the empty size and the window is 1282 * we haven't filled the empty size and the window is
649 * very large. reset and try again 1283 * very large. reset and try again
650 */ 1284 */
@@ -655,19 +1289,6 @@ again:
655 window_free = entry->bytes; 1289 window_free = entry->bytes;
656 last = entry; 1290 last = entry;
657 max_extent = 0; 1291 max_extent = 0;
658 total_retries++;
659 if (total_retries % 64 == 0) {
660 if (min_bytes >= (bytes + empty_size)) {
661 ret = -ENOSPC;
662 goto out;
663 }
664 /*
665 * grow our allocation a bit, we're not having
666 * much luck
667 */
668 min_bytes *= 2;
669 goto again;
670 }
671 } else { 1292 } else {
672 last = next; 1293 last = next;
673 window_free += next->bytes; 1294 window_free += next->bytes;
@@ -685,11 +1306,19 @@ again:
685 * The cluster includes an rbtree, but only uses the offset index 1306 * The cluster includes an rbtree, but only uses the offset index
686 * of each free space cache entry. 1307 * of each free space cache entry.
687 */ 1308 */
688 while(1) { 1309 while (1) {
689 node = rb_next(&entry->offset_index); 1310 node = rb_next(&entry->offset_index);
690 unlink_free_space(block_group, entry); 1311 if (entry->bitmap && node) {
1312 entry = rb_entry(node, struct btrfs_free_space,
1313 offset_index);
1314 continue;
1315 } else if (entry->bitmap && !node) {
1316 break;
1317 }
1318
1319 rb_erase(&entry->offset_index, &block_group->free_space_offset);
691 ret = tree_insert_offset(&cluster->root, entry->offset, 1320 ret = tree_insert_offset(&cluster->root, entry->offset,
692 &entry->offset_index); 1321 &entry->offset_index, 0);
693 BUG_ON(ret); 1322 BUG_ON(ret);
694 1323
695 if (!node || entry == last) 1324 if (!node || entry == last)
@@ -697,8 +1326,10 @@ again:
697 1326
698 entry = rb_entry(node, struct btrfs_free_space, offset_index); 1327 entry = rb_entry(node, struct btrfs_free_space, offset_index);
699 } 1328 }
700 ret = 0; 1329
701 cluster->max_size = max_extent; 1330 cluster->max_size = max_extent;
1331got_it:
1332 ret = 0;
702 atomic_inc(&block_group->count); 1333 atomic_inc(&block_group->count);
703 list_add_tail(&cluster->block_group_list, &block_group->cluster_list); 1334 list_add_tail(&cluster->block_group_list, &block_group->cluster_list);
704 cluster->block_group = block_group; 1335 cluster->block_group = block_group;
@@ -718,6 +1349,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
718 spin_lock_init(&cluster->refill_lock); 1349 spin_lock_init(&cluster->refill_lock);
719 cluster->root.rb_node = NULL; 1350 cluster->root.rb_node = NULL;
720 cluster->max_size = 0; 1351 cluster->max_size = 0;
1352 cluster->points_to_bitmap = false;
721 INIT_LIST_HEAD(&cluster->block_group_list); 1353 INIT_LIST_HEAD(&cluster->block_group_list);
722 cluster->block_group = NULL; 1354 cluster->block_group = NULL;
723} 1355}