aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@redhat.com>2009-04-03 10:14:19 -0400
committerChris Mason <chris.mason@oracle.com>2009-04-03 10:14:19 -0400
commit70cb074345832b75cf422ed729706345511773b3 (patch)
tree37f23796751a20feff12f935568d4f5fff33b3fc /fs
parentbedf762ba3a4b70295661fa70c29c1f18fe0f351 (diff)
Btrfs: free space cache cleanups
This patch cleans up the free space cache code a bit. It better documents the idiosyncrasies of tree_search_offset and makes the code make a bit more sense. I took out the info allocation at the start of __btrfs_add_free_space and put it where it makes more sense. This was left over cruft from when alloc_mutex existed. Also all of the re-searches we do to make sure we inserted properly. Signed-off-by: Josef Bacik <jbacik@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/extent-tree.c2
-rw-r--r--fs/btrfs/free-space-cache.c93
2 files changed, 44 insertions, 51 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f5e7cae63d80..2c214781fee6 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -291,8 +291,8 @@ next:
291 block_group->key.objectid + 291 block_group->key.objectid +
292 block_group->key.offset); 292 block_group->key.offset);
293 293
294 remove_sb_from_cache(root, block_group);
295 block_group->cached = 1; 294 block_group->cached = 1;
295 remove_sb_from_cache(root, block_group);
296 ret = 0; 296 ret = 0;
297err: 297err:
298 btrfs_free_path(path); 298 btrfs_free_path(path);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index d1e5f0e84c58..69b023ff6f72 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -68,14 +68,24 @@ static int tree_insert_bytes(struct rb_root *root, u64 bytes,
68} 68}
69 69
70/* 70/*
71 * searches the tree for the given offset. If contains is set we will return 71 * searches the tree for the given offset.
72 * the free space that contains the given offset. If contains is not set we 72 *
73 * will return the free space that starts at or after the given offset and is 73 * fuzzy == 1: this is used for allocations where we are given a hint of where
74 * at least bytes long. 74 * to look for free space. Because the hint may not be completely on an offset
75 * mark, or the hint may no longer point to free space we need to fudge our
76 * results a bit. So we look for free space starting at or after offset with at
77 * least bytes size. We prefer to find as close to the given offset as we can.
78 * Also if the offset is within a free space range, then we will return the free
79 * space that contains the given offset, which means we can return a free space
80 * chunk with an offset before the provided offset.
81 *
82 * fuzzy == 0: this is just a normal tree search. Give us the free space that
83 * starts at the given offset which is at least bytes size, and if its not there
84 * return NULL.
75 */ 85 */
76static struct btrfs_free_space *tree_search_offset(struct rb_root *root, 86static struct btrfs_free_space *tree_search_offset(struct rb_root *root,
77 u64 offset, u64 bytes, 87 u64 offset, u64 bytes,
78 int contains) 88 int fuzzy)
79{ 89{
80 struct rb_node *n = root->rb_node; 90 struct rb_node *n = root->rb_node;
81 struct btrfs_free_space *entry, *ret = NULL; 91 struct btrfs_free_space *entry, *ret = NULL;
@@ -84,13 +94,14 @@ static struct btrfs_free_space *tree_search_offset(struct rb_root *root,
84 entry = rb_entry(n, struct btrfs_free_space, offset_index); 94 entry = rb_entry(n, struct btrfs_free_space, offset_index);
85 95
86 if (offset < entry->offset) { 96 if (offset < entry->offset) {
87 if (!contains && 97 if (fuzzy &&
88 (!ret || entry->offset < ret->offset) && 98 (!ret || entry->offset < ret->offset) &&
89 (bytes <= entry->bytes)) 99 (bytes <= entry->bytes))
90 ret = entry; 100 ret = entry;
91 n = n->rb_left; 101 n = n->rb_left;
92 } else if (offset > entry->offset) { 102 } else if (offset > entry->offset) {
93 if ((entry->offset + entry->bytes - 1) >= offset && 103 if (fuzzy &&
104 (entry->offset + entry->bytes - 1) >= offset &&
94 bytes <= entry->bytes) { 105 bytes <= entry->bytes) {
95 ret = entry; 106 ret = entry;
96 break; 107 break;
@@ -190,55 +201,28 @@ static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
190 struct btrfs_free_space *right_info; 201 struct btrfs_free_space *right_info;
191 struct btrfs_free_space *left_info; 202 struct btrfs_free_space *left_info;
192 struct btrfs_free_space *info = NULL; 203 struct btrfs_free_space *info = NULL;
193 struct btrfs_free_space *alloc_info;
194 int ret = 0; 204 int ret = 0;
195 205
196 alloc_info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
197 if (!alloc_info)
198 return -ENOMEM;
199
200 /* 206 /*
201 * first we want to see if there is free space adjacent to the range we 207 * first we want to see if there is free space adjacent to the range we
202 * are adding, if there is remove that struct and add a new one to 208 * are adding, if there is remove that struct and add a new one to
203 * cover the entire range 209 * cover the entire range
204 */ 210 */
205 right_info = tree_search_offset(&block_group->free_space_offset, 211 right_info = tree_search_offset(&block_group->free_space_offset,
206 offset+bytes, 0, 1); 212 offset+bytes, 0, 0);
207 left_info = tree_search_offset(&block_group->free_space_offset, 213 left_info = tree_search_offset(&block_group->free_space_offset,
208 offset-1, 0, 1); 214 offset-1, 0, 1);
209 215
210 if (right_info && right_info->offset == offset+bytes) { 216 if (right_info) {
211 unlink_free_space(block_group, right_info); 217 unlink_free_space(block_group, right_info);
212 info = right_info; 218 info = right_info;
213 info->offset = offset; 219 info->offset = offset;
214 info->bytes += bytes; 220 info->bytes += bytes;
215 } else if (right_info && right_info->offset != offset+bytes) {
216 printk(KERN_ERR "btrfs adding space in the middle of an "
217 "existing free space area. existing: "
218 "offset=%llu, bytes=%llu. new: offset=%llu, "
219 "bytes=%llu\n", (unsigned long long)right_info->offset,
220 (unsigned long long)right_info->bytes,
221 (unsigned long long)offset,
222 (unsigned long long)bytes);
223 BUG();
224 } 221 }
225 222
226 if (left_info) { 223 if (left_info && left_info->offset + left_info->bytes == offset) {
227 unlink_free_space(block_group, left_info); 224 unlink_free_space(block_group, left_info);
228 225
229 if (unlikely((left_info->offset + left_info->bytes) !=
230 offset)) {
231 printk(KERN_ERR "btrfs free space to the left "
232 "of new free space isn't "
233 "quite right. existing: offset=%llu, "
234 "bytes=%llu. new: offset=%llu, bytes=%llu\n",
235 (unsigned long long)left_info->offset,
236 (unsigned long long)left_info->bytes,
237 (unsigned long long)offset,
238 (unsigned long long)bytes);
239 BUG();
240 }
241
242 if (info) { 226 if (info) {
243 info->offset = left_info->offset; 227 info->offset = left_info->offset;
244 info->bytes += left_info->bytes; 228 info->bytes += left_info->bytes;
@@ -251,13 +235,15 @@ static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
251 235
252 if (info) { 236 if (info) {
253 ret = link_free_space(block_group, info); 237 ret = link_free_space(block_group, info);
254 if (!ret) 238 if (ret)
255 info = NULL; 239 kfree(info);
256 goto out; 240 goto out;
257 } 241 }
258 242
259 info = alloc_info; 243 info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
260 alloc_info = NULL; 244 if (!info)
245 return -ENOMEM;
246
261 info->offset = offset; 247 info->offset = offset;
262 info->bytes = bytes; 248 info->bytes = bytes;
263 249
@@ -271,8 +257,6 @@ out:
271 BUG(); 257 BUG();
272 } 258 }
273 259
274 kfree(alloc_info);
275
276 return ret; 260 return ret;
277} 261}
278 262
@@ -283,6 +267,7 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
283 struct btrfs_free_space *info; 267 struct btrfs_free_space *info;
284 int ret = 0; 268 int ret = 0;
285 269
270 BUG_ON(!block_group->cached);
286 info = tree_search_offset(&block_group->free_space_offset, offset, 0, 271 info = tree_search_offset(&block_group->free_space_offset, offset, 0,
287 1); 272 1);
288 273
@@ -341,6 +326,18 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
341 offset - old_start); 326 offset - old_start);
342 BUG_ON(ret); 327 BUG_ON(ret);
343 } else { 328 } else {
329 if (!info) {
330 printk(KERN_ERR "couldn't find space %llu to free\n",
331 (unsigned long long)offset);
332 printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n",
333 block_group->cached, block_group->key.objectid,
334 block_group->key.offset);
335 btrfs_dump_free_space(block_group, bytes);
336 } else if (info) {
337 printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, "
338 "but wanted offset=%llu bytes=%llu\n",
339 info->offset, info->bytes, offset, bytes);
340 }
344 WARN_ON(1); 341 WARN_ON(1);
345 } 342 }
346out: 343out:
@@ -351,12 +348,9 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
351 u64 offset, u64 bytes) 348 u64 offset, u64 bytes)
352{ 349{
353 int ret; 350 int ret;
354 struct btrfs_free_space *sp;
355 351
356 mutex_lock(&block_group->alloc_mutex); 352 mutex_lock(&block_group->alloc_mutex);
357 ret = __btrfs_add_free_space(block_group, offset, bytes); 353 ret = __btrfs_add_free_space(block_group, offset, bytes);
358 sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
359 BUG_ON(!sp);
360 mutex_unlock(&block_group->alloc_mutex); 354 mutex_unlock(&block_group->alloc_mutex);
361 355
362 return ret; 356 return ret;
@@ -366,11 +360,8 @@ int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
366 u64 offset, u64 bytes) 360 u64 offset, u64 bytes)
367{ 361{
368 int ret; 362 int ret;
369 struct btrfs_free_space *sp;
370 363
371 ret = __btrfs_add_free_space(block_group, offset, bytes); 364 ret = __btrfs_add_free_space(block_group, offset, bytes);
372 sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
373 BUG_ON(!sp);
374 365
375 return ret; 366 return ret;
376} 367}
@@ -408,6 +399,8 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
408 info = rb_entry(n, struct btrfs_free_space, offset_index); 399 info = rb_entry(n, struct btrfs_free_space, offset_index);
409 if (info->bytes >= bytes) 400 if (info->bytes >= bytes)
410 count++; 401 count++;
402 printk(KERN_ERR "entry offset %llu, bytes %llu\n", info->offset,
403 info->bytes);
411 } 404 }
412 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" 405 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
413 "\n", count); 406 "\n", count);
@@ -486,7 +479,7 @@ struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
486 struct btrfs_free_space *ret = NULL; 479 struct btrfs_free_space *ret = NULL;
487 480
488 ret = tree_search_offset(&block_group->free_space_offset, offset, 481 ret = tree_search_offset(&block_group->free_space_offset, offset,
489 bytes, 0); 482 bytes, 1);
490 if (!ret) 483 if (!ret)
491 ret = tree_search_bytes(&block_group->free_space_bytes, 484 ret = tree_search_bytes(&block_group->free_space_bytes,
492 offset, bytes); 485 offset, bytes);