diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-10-24 19:36:03 -0400 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2013-11-11 00:56:34 -0500 |
commit | 280481d06c8a683d9aaa26125476222e76b733c5 (patch) | |
tree | 513b7387da60b3d497a108335f743369106eb7a3 /drivers/md | |
parent | e58ff155034791ed3a5563d24a50fae0a8c1617c (diff) |
bcache: Debug code improvements
Couple changes:
* Consolidate bch_check_keys() and bch_check_key_order(), and move the
checks that only check_key_order() could do to bch_btree_iter_next().
* Get rid of CONFIG_BCACHE_EDEBUG - now, all that code is compiled in
when CONFIG_BCACHE_DEBUG is enabled, and there's now a sysfs file to
flip on the EDEBUG checks at runtime.
* Dropped an old not terribly useful check in rw_unlock(), and
refactored/improved a some of the other debug code.
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bcache/Kconfig | 11 | ||||
-rw-r--r-- | drivers/md/bcache/alloc.c | 5 | ||||
-rw-r--r-- | drivers/md/bcache/bcache.h | 10 | ||||
-rw-r--r-- | drivers/md/bcache/bset.c | 112 | ||||
-rw-r--r-- | drivers/md/bcache/bset.h | 3 | ||||
-rw-r--r-- | drivers/md/bcache/btree.c | 8 | ||||
-rw-r--r-- | drivers/md/bcache/btree.h | 8 | ||||
-rw-r--r-- | drivers/md/bcache/debug.c | 136 | ||||
-rw-r--r-- | drivers/md/bcache/debug.h | 46 | ||||
-rw-r--r-- | drivers/md/bcache/sysfs.c | 5 | ||||
-rw-r--r-- | drivers/md/bcache/util.h | 4 |
11 files changed, 162 insertions, 186 deletions
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig index f950c9d29f3e..2638417b19aa 100644 --- a/drivers/md/bcache/Kconfig +++ b/drivers/md/bcache/Kconfig | |||
@@ -13,15 +13,8 @@ config BCACHE_DEBUG | |||
13 | ---help--- | 13 | ---help--- |
14 | Don't select this option unless you're a developer | 14 | Don't select this option unless you're a developer |
15 | 15 | ||
16 | Enables extra debugging tools (primarily a fuzz tester) | 16 | Enables extra debugging tools, allows expensive runtime checks to be |
17 | 17 | turned on. | |
18 | config BCACHE_EDEBUG | ||
19 | bool "Extended runtime checks" | ||
20 | depends on BCACHE | ||
21 | ---help--- | ||
22 | Don't select this option unless you're a developer | ||
23 | |||
24 | Enables extra runtime checks which significantly affect performance | ||
25 | 18 | ||
26 | config BCACHE_CLOSURES_DEBUG | 19 | config BCACHE_CLOSURES_DEBUG |
27 | bool "Debug closures" | 20 | bool "Debug closures" |
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 4970ddc6a7f6..ed5920b20c61 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c | |||
@@ -398,8 +398,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, bool wait) | |||
398 | out: | 398 | out: |
399 | wake_up_process(ca->alloc_thread); | 399 | wake_up_process(ca->alloc_thread); |
400 | 400 | ||
401 | #ifdef CONFIG_BCACHE_EDEBUG | 401 | if (expensive_debug_checks(ca->set)) { |
402 | { | ||
403 | size_t iter; | 402 | size_t iter; |
404 | long i; | 403 | long i; |
405 | 404 | ||
@@ -413,7 +412,7 @@ out: | |||
413 | fifo_for_each(i, &ca->unused, iter) | 412 | fifo_for_each(i, &ca->unused, iter) |
414 | BUG_ON(i == r); | 413 | BUG_ON(i == r); |
415 | } | 414 | } |
416 | #endif | 415 | |
417 | b = ca->buckets + r; | 416 | b = ca->buckets + r; |
418 | 417 | ||
419 | BUG_ON(atomic_read(&b->pin) != 1); | 418 | BUG_ON(atomic_read(&b->pin) != 1); |
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 045cb99f1ca6..d03bc6f66493 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
@@ -690,6 +690,7 @@ struct cache_set { | |||
690 | unsigned short journal_delay_ms; | 690 | unsigned short journal_delay_ms; |
691 | unsigned verify:1; | 691 | unsigned verify:1; |
692 | unsigned key_merging_disabled:1; | 692 | unsigned key_merging_disabled:1; |
693 | unsigned expensive_debug_checks:1; | ||
693 | unsigned gc_always_rewrite:1; | 694 | unsigned gc_always_rewrite:1; |
694 | unsigned shrinker_disabled:1; | 695 | unsigned shrinker_disabled:1; |
695 | unsigned copy_gc_enabled:1; | 696 | unsigned copy_gc_enabled:1; |
@@ -698,15 +699,6 @@ struct cache_set { | |||
698 | struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; | 699 | struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; |
699 | }; | 700 | }; |
700 | 701 | ||
701 | static inline bool key_merging_disabled(struct cache_set *c) | ||
702 | { | ||
703 | #ifdef CONFIG_BCACHE_DEBUG | ||
704 | return c->key_merging_disabled; | ||
705 | #else | ||
706 | return 0; | ||
707 | #endif | ||
708 | } | ||
709 | |||
710 | struct bbio { | 702 | struct bbio { |
711 | unsigned submit_time_us; | 703 | unsigned submit_time_us; |
712 | union { | 704 | union { |
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index f32216c75948..6bffde478926 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c | |||
@@ -106,6 +106,43 @@ bad: | |||
106 | return true; | 106 | return true; |
107 | } | 107 | } |
108 | 108 | ||
109 | static bool ptr_bad_expensive_checks(struct btree *b, const struct bkey *k, | ||
110 | unsigned ptr) | ||
111 | { | ||
112 | struct bucket *g = PTR_BUCKET(b->c, k, ptr); | ||
113 | char buf[80]; | ||
114 | |||
115 | if (mutex_trylock(&b->c->bucket_lock)) { | ||
116 | if (b->level) { | ||
117 | if (KEY_DIRTY(k) || | ||
118 | g->prio != BTREE_PRIO || | ||
119 | (b->c->gc_mark_valid && | ||
120 | GC_MARK(g) != GC_MARK_METADATA)) | ||
121 | goto err; | ||
122 | |||
123 | } else { | ||
124 | if (g->prio == BTREE_PRIO) | ||
125 | goto err; | ||
126 | |||
127 | if (KEY_DIRTY(k) && | ||
128 | b->c->gc_mark_valid && | ||
129 | GC_MARK(g) != GC_MARK_DIRTY) | ||
130 | goto err; | ||
131 | } | ||
132 | mutex_unlock(&b->c->bucket_lock); | ||
133 | } | ||
134 | |||
135 | return false; | ||
136 | err: | ||
137 | mutex_unlock(&b->c->bucket_lock); | ||
138 | bch_bkey_to_text(buf, sizeof(buf), k); | ||
139 | btree_bug(b, | ||
140 | "inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", | ||
141 | buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin), | ||
142 | g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); | ||
143 | return true; | ||
144 | } | ||
145 | |||
109 | bool bch_ptr_bad(struct btree *b, const struct bkey *k) | 146 | bool bch_ptr_bad(struct btree *b, const struct bkey *k) |
110 | { | 147 | { |
111 | struct bucket *g; | 148 | struct bucket *g; |
@@ -133,46 +170,12 @@ bool bch_ptr_bad(struct btree *b, const struct bkey *k) | |||
133 | if (stale) | 170 | if (stale) |
134 | return true; | 171 | return true; |
135 | 172 | ||
136 | #ifdef CONFIG_BCACHE_EDEBUG | 173 | if (expensive_debug_checks(b->c) && |
137 | if (!mutex_trylock(&b->c->bucket_lock)) | 174 | ptr_bad_expensive_checks(b, k, i)) |
138 | continue; | 175 | return true; |
139 | |||
140 | if (b->level) { | ||
141 | if (KEY_DIRTY(k) || | ||
142 | g->prio != BTREE_PRIO || | ||
143 | (b->c->gc_mark_valid && | ||
144 | GC_MARK(g) != GC_MARK_METADATA)) | ||
145 | goto bug; | ||
146 | |||
147 | } else { | ||
148 | if (g->prio == BTREE_PRIO) | ||
149 | goto bug; | ||
150 | |||
151 | if (KEY_DIRTY(k) && | ||
152 | b->c->gc_mark_valid && | ||
153 | GC_MARK(g) != GC_MARK_DIRTY) | ||
154 | goto bug; | ||
155 | } | ||
156 | mutex_unlock(&b->c->bucket_lock); | ||
157 | #endif | ||
158 | } | 176 | } |
159 | 177 | ||
160 | return false; | 178 | return false; |
161 | #ifdef CONFIG_BCACHE_EDEBUG | ||
162 | bug: | ||
163 | mutex_unlock(&b->c->bucket_lock); | ||
164 | |||
165 | { | ||
166 | char buf[80]; | ||
167 | |||
168 | bch_bkey_to_text(buf, sizeof(buf), k); | ||
169 | btree_bug(b, | ||
170 | "inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", | ||
171 | buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), | ||
172 | g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); | ||
173 | } | ||
174 | return true; | ||
175 | #endif | ||
176 | } | 179 | } |
177 | 180 | ||
178 | /* Key/pointer manipulation */ | 181 | /* Key/pointer manipulation */ |
@@ -821,16 +824,16 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t, | |||
821 | } else | 824 | } else |
822 | i = bset_search_write_set(b, t, search); | 825 | i = bset_search_write_set(b, t, search); |
823 | 826 | ||
824 | #ifdef CONFIG_BCACHE_EDEBUG | 827 | if (expensive_debug_checks(b->c)) { |
825 | BUG_ON(bset_written(b, t) && | 828 | BUG_ON(bset_written(b, t) && |
826 | i.l != t->data->start && | 829 | i.l != t->data->start && |
827 | bkey_cmp(tree_to_prev_bkey(t, | 830 | bkey_cmp(tree_to_prev_bkey(t, |
828 | inorder_to_tree(bkey_to_cacheline(t, i.l), t)), | 831 | inorder_to_tree(bkey_to_cacheline(t, i.l), t)), |
829 | search) > 0); | 832 | search) > 0); |
830 | 833 | ||
831 | BUG_ON(i.r != end(t->data) && | 834 | BUG_ON(i.r != end(t->data) && |
832 | bkey_cmp(i.r, search) <= 0); | 835 | bkey_cmp(i.r, search) <= 0); |
833 | #endif | 836 | } |
834 | 837 | ||
835 | while (likely(i.l != i.r) && | 838 | while (likely(i.l != i.r) && |
836 | bkey_cmp(i.l, search) <= 0) | 839 | bkey_cmp(i.l, search) <= 0) |
@@ -871,12 +874,16 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, | |||
871 | } | 874 | } |
872 | 875 | ||
873 | struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter, | 876 | struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter, |
874 | struct bkey *search, struct bset_tree *start) | 877 | struct bkey *search, struct bset_tree *start) |
875 | { | 878 | { |
876 | struct bkey *ret = NULL; | 879 | struct bkey *ret = NULL; |
877 | iter->size = ARRAY_SIZE(iter->data); | 880 | iter->size = ARRAY_SIZE(iter->data); |
878 | iter->used = 0; | 881 | iter->used = 0; |
879 | 882 | ||
883 | #ifdef CONFIG_BCACHE_DEBUG | ||
884 | iter->b = b; | ||
885 | #endif | ||
886 | |||
880 | for (; start <= &b->sets[b->nsets]; start++) { | 887 | for (; start <= &b->sets[b->nsets]; start++) { |
881 | ret = bch_bset_search(b, start, search); | 888 | ret = bch_bset_search(b, start, search); |
882 | bch_btree_iter_push(iter, ret, end(start->data)); | 889 | bch_btree_iter_push(iter, ret, end(start->data)); |
@@ -891,6 +898,8 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter) | |||
891 | struct bkey *ret = NULL; | 898 | struct bkey *ret = NULL; |
892 | 899 | ||
893 | if (!btree_iter_end(iter)) { | 900 | if (!btree_iter_end(iter)) { |
901 | bch_btree_iter_next_check(iter); | ||
902 | |||
894 | ret = iter->data->k; | 903 | ret = iter->data->k; |
895 | iter->data->k = bkey_next(iter->data->k); | 904 | iter->data->k = bkey_next(iter->data->k); |
896 | 905 | ||
@@ -1002,7 +1011,6 @@ static void btree_mergesort(struct btree *b, struct bset *out, | |||
1002 | out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0; | 1011 | out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0; |
1003 | 1012 | ||
1004 | pr_debug("sorted %i keys", out->keys); | 1013 | pr_debug("sorted %i keys", out->keys); |
1005 | bch_check_key_order(b, out); | ||
1006 | } | 1014 | } |
1007 | 1015 | ||
1008 | static void __btree_sort(struct btree *b, struct btree_iter *iter, | 1016 | static void __btree_sort(struct btree *b, struct btree_iter *iter, |
@@ -1063,15 +1071,15 @@ static void __btree_sort(struct btree *b, struct btree_iter *iter, | |||
1063 | 1071 | ||
1064 | void bch_btree_sort_partial(struct btree *b, unsigned start) | 1072 | void bch_btree_sort_partial(struct btree *b, unsigned start) |
1065 | { | 1073 | { |
1066 | size_t oldsize = 0, order = b->page_order, keys = 0; | 1074 | size_t order = b->page_order, keys = 0; |
1067 | struct btree_iter iter; | 1075 | struct btree_iter iter; |
1076 | int oldsize = bch_count_data(b); | ||
1077 | |||
1068 | __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]); | 1078 | __bch_btree_iter_init(b, &iter, NULL, &b->sets[start]); |
1069 | 1079 | ||
1070 | BUG_ON(b->sets[b->nsets].data == write_block(b) && | 1080 | BUG_ON(b->sets[b->nsets].data == write_block(b) && |
1071 | (b->sets[b->nsets].size || b->nsets)); | 1081 | (b->sets[b->nsets].size || b->nsets)); |
1072 | 1082 | ||
1073 | if (b->written) | ||
1074 | oldsize = bch_count_data(b); | ||
1075 | 1083 | ||
1076 | if (start) { | 1084 | if (start) { |
1077 | unsigned i; | 1085 | unsigned i; |
@@ -1087,7 +1095,7 @@ void bch_btree_sort_partial(struct btree *b, unsigned start) | |||
1087 | 1095 | ||
1088 | __btree_sort(b, &iter, start, order, false); | 1096 | __btree_sort(b, &iter, start, order, false); |
1089 | 1097 | ||
1090 | EBUG_ON(b->written && bch_count_data(b) != oldsize); | 1098 | EBUG_ON(b->written && oldsize >= 0 && bch_count_data(b) != oldsize); |
1091 | } | 1099 | } |
1092 | 1100 | ||
1093 | void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter) | 1101 | void bch_btree_sort_and_fix_extents(struct btree *b, struct btree_iter *iter) |
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index 5cd90565dfe2..a043a92d4dc9 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h | |||
@@ -148,6 +148,9 @@ | |||
148 | 148 | ||
149 | struct btree_iter { | 149 | struct btree_iter { |
150 | size_t size, used; | 150 | size_t size, used; |
151 | #ifdef CONFIG_BCACHE_DEBUG | ||
152 | struct btree *b; | ||
153 | #endif | ||
151 | struct btree_iter_set { | 154 | struct btree_iter_set { |
152 | struct bkey *k, *end; | 155 | struct bkey *k, *end; |
153 | } data[MAX_BSETS]; | 156 | } data[MAX_BSETS]; |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index aba787d954e5..fa4d0b1f6d75 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -216,6 +216,10 @@ static void bch_btree_node_read_done(struct btree *b) | |||
216 | iter->size = b->c->sb.bucket_size / b->c->sb.block_size; | 216 | iter->size = b->c->sb.bucket_size / b->c->sb.block_size; |
217 | iter->used = 0; | 217 | iter->used = 0; |
218 | 218 | ||
219 | #ifdef CONFIG_BCACHE_DEBUG | ||
220 | iter->b = b; | ||
221 | #endif | ||
222 | |||
219 | if (!i->seq) | 223 | if (!i->seq) |
220 | goto err; | 224 | goto err; |
221 | 225 | ||
@@ -454,7 +458,7 @@ void bch_btree_node_write(struct btree *b, struct closure *parent) | |||
454 | BUG_ON(b->written >= btree_blocks(b)); | 458 | BUG_ON(b->written >= btree_blocks(b)); |
455 | BUG_ON(b->written && !i->keys); | 459 | BUG_ON(b->written && !i->keys); |
456 | BUG_ON(b->sets->data->seq != i->seq); | 460 | BUG_ON(b->sets->data->seq != i->seq); |
457 | bch_check_key_order(b, i); | 461 | bch_check_keys(b, "writing"); |
458 | 462 | ||
459 | cancel_delayed_work(&b->work); | 463 | cancel_delayed_work(&b->work); |
460 | 464 | ||
@@ -1917,7 +1921,7 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, | |||
1917 | struct bkey *replace_key) | 1921 | struct bkey *replace_key) |
1918 | { | 1922 | { |
1919 | bool ret = false; | 1923 | bool ret = false; |
1920 | unsigned oldsize = bch_count_data(b); | 1924 | int oldsize = bch_count_data(b); |
1921 | 1925 | ||
1922 | while (!bch_keylist_empty(insert_keys)) { | 1926 | while (!bch_keylist_empty(insert_keys)) { |
1923 | struct bset *i = write_block(b); | 1927 | struct bset *i = write_block(b); |
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 8fc1e8925399..27e90b189112 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h | |||
@@ -259,14 +259,6 @@ static inline void rw_lock(bool w, struct btree *b, int level) | |||
259 | 259 | ||
260 | static inline void rw_unlock(bool w, struct btree *b) | 260 | static inline void rw_unlock(bool w, struct btree *b) |
261 | { | 261 | { |
262 | #ifdef CONFIG_BCACHE_EDEBUG | ||
263 | unsigned i; | ||
264 | |||
265 | if (w && b->key.ptr[0]) | ||
266 | for (i = 0; i <= b->nsets; i++) | ||
267 | bch_check_key_order(b, b->sets[i].data); | ||
268 | #endif | ||
269 | |||
270 | if (w) | 262 | if (w) |
271 | b->seq++; | 263 | b->seq++; |
272 | (w ? up_write : up_read)(&b->lock); | 264 | (w ? up_write : up_read)(&b->lock); |
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index d9ccb3169aa2..e99e6b8852b2 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c | |||
@@ -76,29 +76,17 @@ int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k) | |||
76 | return out - buf; | 76 | return out - buf; |
77 | } | 77 | } |
78 | 78 | ||
79 | int bch_btree_to_text(char *buf, size_t size, const struct btree *b) | 79 | #ifdef CONFIG_BCACHE_DEBUG |
80 | { | ||
81 | return scnprintf(buf, size, "%zu level %i/%i", | ||
82 | PTR_BUCKET_NR(b->c, &b->key, 0), | ||
83 | b->level, b->c->root ? b->c->root->level : -1); | ||
84 | } | ||
85 | |||
86 | #if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG) | ||
87 | |||
88 | static bool skipped_backwards(struct btree *b, struct bkey *k) | ||
89 | { | ||
90 | return bkey_cmp(k, (!b->level) | ||
91 | ? &START_KEY(bkey_next(k)) | ||
92 | : bkey_next(k)) > 0; | ||
93 | } | ||
94 | 80 | ||
95 | static void dump_bset(struct btree *b, struct bset *i) | 81 | static void dump_bset(struct btree *b, struct bset *i) |
96 | { | 82 | { |
97 | struct bkey *k; | 83 | struct bkey *k, *next; |
98 | unsigned j; | 84 | unsigned j; |
99 | char buf[80]; | 85 | char buf[80]; |
100 | 86 | ||
101 | for (k = i->start; k < end(i); k = bkey_next(k)) { | 87 | for (k = i->start; k < end(i); k = next) { |
88 | next = bkey_next(k); | ||
89 | |||
102 | bch_bkey_to_text(buf, sizeof(buf), k); | 90 | bch_bkey_to_text(buf, sizeof(buf), k); |
103 | printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b), | 91 | printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b), |
104 | (uint64_t *) k - i->d, i->keys, buf); | 92 | (uint64_t *) k - i->d, i->keys, buf); |
@@ -114,15 +102,21 @@ static void dump_bset(struct btree *b, struct bset *i) | |||
114 | 102 | ||
115 | printk(" %s\n", bch_ptr_status(b->c, k)); | 103 | printk(" %s\n", bch_ptr_status(b->c, k)); |
116 | 104 | ||
117 | if (bkey_next(k) < end(i) && | 105 | if (next < end(i) && |
118 | skipped_backwards(b, k)) | 106 | bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0) |
119 | printk(KERN_ERR "Key skipped backwards\n"); | 107 | printk(KERN_ERR "Key skipped backwards\n"); |
120 | } | 108 | } |
121 | } | 109 | } |
122 | 110 | ||
123 | #endif | 111 | static void bch_dump_bucket(struct btree *b) |
112 | { | ||
113 | unsigned i; | ||
124 | 114 | ||
125 | #ifdef CONFIG_BCACHE_DEBUG | 115 | console_lock(); |
116 | for (i = 0; i <= b->nsets; i++) | ||
117 | dump_bset(b, b->sets[i].data); | ||
118 | console_unlock(); | ||
119 | } | ||
126 | 120 | ||
127 | void bch_btree_verify(struct btree *b, struct bset *new) | 121 | void bch_btree_verify(struct btree *b, struct bset *new) |
128 | { | 122 | { |
@@ -211,11 +205,7 @@ out_put: | |||
211 | bio_put(check); | 205 | bio_put(check); |
212 | } | 206 | } |
213 | 207 | ||
214 | #endif | 208 | int __bch_count_data(struct btree *b) |
215 | |||
216 | #ifdef CONFIG_BCACHE_EDEBUG | ||
217 | |||
218 | unsigned bch_count_data(struct btree *b) | ||
219 | { | 209 | { |
220 | unsigned ret = 0; | 210 | unsigned ret = 0; |
221 | struct btree_iter iter; | 211 | struct btree_iter iter; |
@@ -227,72 +217,60 @@ unsigned bch_count_data(struct btree *b) | |||
227 | return ret; | 217 | return ret; |
228 | } | 218 | } |
229 | 219 | ||
230 | static void vdump_bucket_and_panic(struct btree *b, const char *fmt, | 220 | void __bch_check_keys(struct btree *b, const char *fmt, ...) |
231 | va_list args) | ||
232 | { | ||
233 | unsigned i; | ||
234 | char buf[80]; | ||
235 | |||
236 | console_lock(); | ||
237 | |||
238 | for (i = 0; i <= b->nsets; i++) | ||
239 | dump_bset(b, b->sets[i].data); | ||
240 | |||
241 | vprintk(fmt, args); | ||
242 | |||
243 | console_unlock(); | ||
244 | |||
245 | bch_btree_to_text(buf, sizeof(buf), b); | ||
246 | panic("at %s\n", buf); | ||
247 | } | ||
248 | |||
249 | void bch_check_key_order_msg(struct btree *b, struct bset *i, | ||
250 | const char *fmt, ...) | ||
251 | { | ||
252 | struct bkey *k; | ||
253 | |||
254 | if (!i->keys) | ||
255 | return; | ||
256 | |||
257 | for (k = i->start; bkey_next(k) < end(i); k = bkey_next(k)) | ||
258 | if (skipped_backwards(b, k)) { | ||
259 | va_list args; | ||
260 | va_start(args, fmt); | ||
261 | |||
262 | vdump_bucket_and_panic(b, fmt, args); | ||
263 | va_end(args); | ||
264 | } | ||
265 | } | ||
266 | |||
267 | void bch_check_keys(struct btree *b, const char *fmt, ...) | ||
268 | { | 221 | { |
269 | va_list args; | 222 | va_list args; |
270 | struct bkey *k, *p = NULL; | 223 | struct bkey *k, *p = NULL; |
271 | struct btree_iter iter; | 224 | struct btree_iter iter; |
272 | 225 | const char *err; | |
273 | if (b->level) | ||
274 | return; | ||
275 | 226 | ||
276 | for_each_key(b, k, &iter) { | 227 | for_each_key(b, k, &iter) { |
277 | if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) { | 228 | if (!b->level) { |
278 | printk(KERN_ERR "Keys out of order:\n"); | 229 | err = "Keys out of order"; |
279 | goto bug; | 230 | if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) |
280 | } | 231 | goto bug; |
281 | 232 | ||
282 | if (bch_ptr_invalid(b, k)) | 233 | if (bch_ptr_invalid(b, k)) |
283 | continue; | 234 | continue; |
284 | 235 | ||
285 | if (p && bkey_cmp(p, &START_KEY(k)) > 0) { | 236 | err = "Overlapping keys"; |
286 | printk(KERN_ERR "Overlapping keys:\n"); | 237 | if (p && bkey_cmp(p, &START_KEY(k)) > 0) |
287 | goto bug; | 238 | goto bug; |
239 | } else { | ||
240 | if (bch_ptr_bad(b, k)) | ||
241 | continue; | ||
242 | |||
243 | err = "Duplicate keys"; | ||
244 | if (p && !bkey_cmp(p, k)) | ||
245 | goto bug; | ||
288 | } | 246 | } |
289 | p = k; | 247 | p = k; |
290 | } | 248 | } |
249 | |||
250 | err = "Key larger than btree node key"; | ||
251 | if (p && bkey_cmp(p, &b->key) > 0) | ||
252 | goto bug; | ||
253 | |||
291 | return; | 254 | return; |
292 | bug: | 255 | bug: |
256 | bch_dump_bucket(b); | ||
257 | |||
293 | va_start(args, fmt); | 258 | va_start(args, fmt); |
294 | vdump_bucket_and_panic(b, fmt, args); | 259 | vprintk(fmt, args); |
295 | va_end(args); | 260 | va_end(args); |
261 | |||
262 | panic("bcache error: %s:\n", err); | ||
263 | } | ||
264 | |||
265 | void bch_btree_iter_next_check(struct btree_iter *iter) | ||
266 | { | ||
267 | struct bkey *k = iter->data->k, *next = bkey_next(k); | ||
268 | |||
269 | if (next < iter->data->end && | ||
270 | bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) { | ||
271 | bch_dump_bucket(iter->b); | ||
272 | panic("Key skipped backwards\n"); | ||
273 | } | ||
296 | } | 274 | } |
297 | 275 | ||
298 | #endif | 276 | #endif |
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h index 0f4b3440512c..7914ba0ff316 100644 --- a/drivers/md/bcache/debug.h +++ b/drivers/md/bcache/debug.h | |||
@@ -4,40 +4,42 @@ | |||
4 | /* Btree/bkey debug printing */ | 4 | /* Btree/bkey debug printing */ |
5 | 5 | ||
6 | int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k); | 6 | int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k); |
7 | int bch_btree_to_text(char *buf, size_t size, const struct btree *b); | ||
8 | |||
9 | #ifdef CONFIG_BCACHE_EDEBUG | ||
10 | |||
11 | unsigned bch_count_data(struct btree *); | ||
12 | void bch_check_key_order_msg(struct btree *, struct bset *, const char *, ...); | ||
13 | void bch_check_keys(struct btree *, const char *, ...); | ||
14 | |||
15 | #define bch_check_key_order(b, i) \ | ||
16 | bch_check_key_order_msg(b, i, "keys out of order") | ||
17 | #define EBUG_ON(cond) BUG_ON(cond) | ||
18 | |||
19 | #else /* EDEBUG */ | ||
20 | |||
21 | #define bch_count_data(b) 0 | ||
22 | #define bch_check_key_order(b, i) do {} while (0) | ||
23 | #define bch_check_key_order_msg(b, i, ...) do {} while (0) | ||
24 | #define bch_check_keys(b, ...) do {} while (0) | ||
25 | #define EBUG_ON(cond) do {} while (0) | ||
26 | |||
27 | #endif | ||
28 | 7 | ||
29 | #ifdef CONFIG_BCACHE_DEBUG | 8 | #ifdef CONFIG_BCACHE_DEBUG |
30 | 9 | ||
31 | void bch_btree_verify(struct btree *, struct bset *); | 10 | void bch_btree_verify(struct btree *, struct bset *); |
32 | void bch_data_verify(struct cached_dev *, struct bio *); | 11 | void bch_data_verify(struct cached_dev *, struct bio *); |
12 | int __bch_count_data(struct btree *); | ||
13 | void __bch_check_keys(struct btree *, const char *, ...); | ||
14 | void bch_btree_iter_next_check(struct btree_iter *); | ||
15 | |||
16 | #define EBUG_ON(cond) BUG_ON(cond) | ||
17 | #define expensive_debug_checks(c) ((c)->expensive_debug_checks) | ||
18 | #define key_merging_disabled(c) ((c)->key_merging_disabled) | ||
33 | 19 | ||
34 | #else /* DEBUG */ | 20 | #else /* DEBUG */ |
35 | 21 | ||
36 | static inline void bch_btree_verify(struct btree *b, struct bset *i) {} | 22 | static inline void bch_btree_verify(struct btree *b, struct bset *i) {} |
37 | static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}; | 23 | static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {} |
24 | static inline int __bch_count_data(struct btree *b) { return -1; } | ||
25 | static inline void __bch_check_keys(struct btree *b, const char *fmt, ...) {} | ||
26 | static inline void bch_btree_iter_next_check(struct btree_iter *iter) {} | ||
27 | |||
28 | #define EBUG_ON(cond) do { if (cond); } while (0) | ||
29 | #define expensive_debug_checks(c) 0 | ||
30 | #define key_merging_disabled(c) 0 | ||
38 | 31 | ||
39 | #endif | 32 | #endif |
40 | 33 | ||
34 | #define bch_count_data(b) \ | ||
35 | (expensive_debug_checks((b)->c) ? __bch_count_data(b) : -1) | ||
36 | |||
37 | #define bch_check_keys(b, ...) \ | ||
38 | do { \ | ||
39 | if (expensive_debug_checks((b)->c)) \ | ||
40 | __bch_check_keys(b, __VA_ARGS__); \ | ||
41 | } while (0) | ||
42 | |||
41 | #ifdef CONFIG_DEBUG_FS | 43 | #ifdef CONFIG_DEBUG_FS |
42 | void bch_debug_init_cache_set(struct cache_set *); | 44 | void bch_debug_init_cache_set(struct cache_set *); |
43 | #else | 45 | #else |
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index ab286b9b5e40..9687771ec6f3 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c | |||
@@ -102,6 +102,7 @@ rw_attribute(io_error_halflife); | |||
102 | rw_attribute(verify); | 102 | rw_attribute(verify); |
103 | rw_attribute(key_merging_disabled); | 103 | rw_attribute(key_merging_disabled); |
104 | rw_attribute(gc_always_rewrite); | 104 | rw_attribute(gc_always_rewrite); |
105 | rw_attribute(expensive_debug_checks); | ||
105 | rw_attribute(freelist_percent); | 106 | rw_attribute(freelist_percent); |
106 | rw_attribute(cache_replacement_policy); | 107 | rw_attribute(cache_replacement_policy); |
107 | rw_attribute(btree_shrinker_disabled); | 108 | rw_attribute(btree_shrinker_disabled); |
@@ -517,6 +518,8 @@ lock_root: | |||
517 | sysfs_print(active_journal_entries, fifo_used(&c->journal.pin)); | 518 | sysfs_print(active_journal_entries, fifo_used(&c->journal.pin)); |
518 | sysfs_printf(verify, "%i", c->verify); | 519 | sysfs_printf(verify, "%i", c->verify); |
519 | sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled); | 520 | sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled); |
521 | sysfs_printf(expensive_debug_checks, | ||
522 | "%i", c->expensive_debug_checks); | ||
520 | sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite); | 523 | sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite); |
521 | sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled); | 524 | sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled); |
522 | sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); | 525 | sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); |
@@ -599,6 +602,7 @@ STORE(__bch_cache_set) | |||
599 | sysfs_strtoul(journal_delay_ms, c->journal_delay_ms); | 602 | sysfs_strtoul(journal_delay_ms, c->journal_delay_ms); |
600 | sysfs_strtoul(verify, c->verify); | 603 | sysfs_strtoul(verify, c->verify); |
601 | sysfs_strtoul(key_merging_disabled, c->key_merging_disabled); | 604 | sysfs_strtoul(key_merging_disabled, c->key_merging_disabled); |
605 | sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks); | ||
602 | sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite); | 606 | sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite); |
603 | sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled); | 607 | sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled); |
604 | sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled); | 608 | sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled); |
@@ -674,6 +678,7 @@ static struct attribute *bch_cache_set_internal_files[] = { | |||
674 | #ifdef CONFIG_BCACHE_DEBUG | 678 | #ifdef CONFIG_BCACHE_DEBUG |
675 | &sysfs_verify, | 679 | &sysfs_verify, |
676 | &sysfs_key_merging_disabled, | 680 | &sysfs_key_merging_disabled, |
681 | &sysfs_expensive_debug_checks, | ||
677 | #endif | 682 | #endif |
678 | &sysfs_gc_always_rewrite, | 683 | &sysfs_gc_always_rewrite, |
679 | &sysfs_btree_shrinker_disabled, | 684 | &sysfs_btree_shrinker_disabled, |
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 38ae7a4ce928..8ce5aab55962 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h | |||
@@ -15,12 +15,12 @@ | |||
15 | 15 | ||
16 | struct closure; | 16 | struct closure; |
17 | 17 | ||
18 | #ifdef CONFIG_BCACHE_EDEBUG | 18 | #ifdef CONFIG_BCACHE_DEBUG |
19 | 19 | ||
20 | #define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0) | 20 | #define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0) |
21 | #define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i) | 21 | #define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i) |
22 | 22 | ||
23 | #else /* EDEBUG */ | 23 | #else /* DEBUG */ |
24 | 24 | ||
25 | #define atomic_dec_bug(v) atomic_dec(v) | 25 | #define atomic_dec_bug(v) atomic_dec(v) |
26 | #define atomic_inc_bug(v, i) atomic_inc(v) | 26 | #define atomic_inc_bug(v, i) atomic_inc(v) |