aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorArne Jansen <sensille@gmx.net>2011-09-12 06:22:57 -0400
committerJan Schmidt <list.btrfs@jan-o-sch.net>2012-07-10 09:14:44 -0400
commit709c0486b9fe9586736b108b7233bbce0300cfa5 (patch)
tree1e7ea470887983e6aa856d2a91cd46927b6536ea /fs/btrfs/extent-tree.c
parent416ac51da90e98daaac17e1f359a6c5591f7f5bd (diff)
Btrfs: Test code to change the order of delayed-ref processing
Normally delayed refs get processed in ascending bytenr order. This correlates in most cases to the order added. To expose dependencies on this order, we start to process the tree in the middle instead of the beginning. This code is only effective when SCRAMBLE_DELAYED_REFS is defined. Signed-off-by: Arne Jansen <sensille@gmx.net>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c49
1 files changed, 49 insertions, 0 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 94ce79f76e5f..b13f1fbc3733 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -34,6 +34,8 @@
34#include "locking.h" 34#include "locking.h"
35#include "free-space-cache.h" 35#include "free-space-cache.h"
36 36
37#undef SCRAMBLE_DELAYED_REFS
38
37/* 39/*
38 * control flags for do_chunk_alloc's force field 40 * control flags for do_chunk_alloc's force field
39 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk 41 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
@@ -2364,6 +2366,49 @@ static void wait_for_more_refs(struct btrfs_fs_info *fs_info,
2364 spin_lock(&delayed_refs->lock); 2366 spin_lock(&delayed_refs->lock);
2365} 2367}
2366 2368
2369#ifdef SCRAMBLE_DELAYED_REFS
2370/*
2371 * Normally delayed refs get processed in ascending bytenr order. This
2372 * correlates in most cases to the order added. To expose dependencies on this
2373 * order, we start to process the tree in the middle instead of the beginning
2374 */
2375static u64 find_middle(struct rb_root *root)
2376{
2377 struct rb_node *n = root->rb_node;
2378 struct btrfs_delayed_ref_node *entry;
2379 int alt = 1;
2380 u64 middle;
2381 u64 first = 0, last = 0;
2382
2383 n = rb_first(root);
2384 if (n) {
2385 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2386 first = entry->bytenr;
2387 }
2388 n = rb_last(root);
2389 if (n) {
2390 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2391 last = entry->bytenr;
2392 }
2393 n = root->rb_node;
2394
2395 while (n) {
2396 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2397 WARN_ON(!entry->in_tree);
2398
2399 middle = entry->bytenr;
2400
2401 if (alt)
2402 n = n->rb_left;
2403 else
2404 n = n->rb_right;
2405
2406 alt = 1 - alt;
2407 }
2408 return middle;
2409}
2410#endif
2411
2367/* 2412/*
2368 * this starts processing the delayed reference count updates and 2413 * this starts processing the delayed reference count updates and
2369 * extent insertions we have queued up so far. count can be 2414 * extent insertions we have queued up so far. count can be
@@ -2406,6 +2451,10 @@ again:
2406 consider_waiting = 0; 2451 consider_waiting = 0;
2407 spin_lock(&delayed_refs->lock); 2452 spin_lock(&delayed_refs->lock);
2408 2453
2454#ifdef SCRAMBLE_DELAYED_REFS
2455 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2456#endif
2457
2409 if (count == 0) { 2458 if (count == 0) {
2410 count = delayed_refs->num_entries * 2; 2459 count = delayed_refs->num_entries * 2;
2411 run_most = 1; 2460 run_most = 1;