diff options
author | Kent Overstreet <kmo@daterainc.com> | 2013-10-24 19:36:03 -0400 |
---|---|---|
committer | Kent Overstreet <kmo@daterainc.com> | 2013-11-11 00:56:34 -0500 |
commit | 280481d06c8a683d9aaa26125476222e76b733c5 (patch) | |
tree | 513b7387da60b3d497a108335f743369106eb7a3 /drivers/md/bcache/debug.c | |
parent | e58ff155034791ed3a5563d24a50fae0a8c1617c (diff) |
bcache: Debug code improvements
Couple changes:
* Consolidate bch_check_keys() and bch_check_key_order(), and move the
checks that only check_key_order() could do to bch_btree_iter_next().
* Get rid of CONFIG_BCACHE_EDEBUG - now, all that code is compiled in
when CONFIG_BCACHE_DEBUG is enabled, and there's now a sysfs file to
flip on the EDEBUG checks at runtime.
* Dropped an old not terribly useful check in rw_unlock(), and
refactored/improved a some of the other debug code.
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/debug.c')
-rw-r--r-- | drivers/md/bcache/debug.c | 136 |
1 files changed, 57 insertions, 79 deletions
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index d9ccb3169aa2..e99e6b8852b2 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c | |||
@@ -76,29 +76,17 @@ int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k) | |||
76 | return out - buf; | 76 | return out - buf; |
77 | } | 77 | } |
78 | 78 | ||
79 | int bch_btree_to_text(char *buf, size_t size, const struct btree *b) | 79 | #ifdef CONFIG_BCACHE_DEBUG |
80 | { | ||
81 | return scnprintf(buf, size, "%zu level %i/%i", | ||
82 | PTR_BUCKET_NR(b->c, &b->key, 0), | ||
83 | b->level, b->c->root ? b->c->root->level : -1); | ||
84 | } | ||
85 | |||
86 | #if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG) | ||
87 | |||
88 | static bool skipped_backwards(struct btree *b, struct bkey *k) | ||
89 | { | ||
90 | return bkey_cmp(k, (!b->level) | ||
91 | ? &START_KEY(bkey_next(k)) | ||
92 | : bkey_next(k)) > 0; | ||
93 | } | ||
94 | 80 | ||
95 | static void dump_bset(struct btree *b, struct bset *i) | 81 | static void dump_bset(struct btree *b, struct bset *i) |
96 | { | 82 | { |
97 | struct bkey *k; | 83 | struct bkey *k, *next; |
98 | unsigned j; | 84 | unsigned j; |
99 | char buf[80]; | 85 | char buf[80]; |
100 | 86 | ||
101 | for (k = i->start; k < end(i); k = bkey_next(k)) { | 87 | for (k = i->start; k < end(i); k = next) { |
88 | next = bkey_next(k); | ||
89 | |||
102 | bch_bkey_to_text(buf, sizeof(buf), k); | 90 | bch_bkey_to_text(buf, sizeof(buf), k); |
103 | printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b), | 91 | printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b), |
104 | (uint64_t *) k - i->d, i->keys, buf); | 92 | (uint64_t *) k - i->d, i->keys, buf); |
@@ -114,15 +102,21 @@ static void dump_bset(struct btree *b, struct bset *i) | |||
114 | 102 | ||
115 | printk(" %s\n", bch_ptr_status(b->c, k)); | 103 | printk(" %s\n", bch_ptr_status(b->c, k)); |
116 | 104 | ||
117 | if (bkey_next(k) < end(i) && | 105 | if (next < end(i) && |
118 | skipped_backwards(b, k)) | 106 | bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0) |
119 | printk(KERN_ERR "Key skipped backwards\n"); | 107 | printk(KERN_ERR "Key skipped backwards\n"); |
120 | } | 108 | } |
121 | } | 109 | } |
122 | 110 | ||
123 | #endif | 111 | static void bch_dump_bucket(struct btree *b) |
112 | { | ||
113 | unsigned i; | ||
124 | 114 | ||
125 | #ifdef CONFIG_BCACHE_DEBUG | 115 | console_lock(); |
116 | for (i = 0; i <= b->nsets; i++) | ||
117 | dump_bset(b, b->sets[i].data); | ||
118 | console_unlock(); | ||
119 | } | ||
126 | 120 | ||
127 | void bch_btree_verify(struct btree *b, struct bset *new) | 121 | void bch_btree_verify(struct btree *b, struct bset *new) |
128 | { | 122 | { |
@@ -211,11 +205,7 @@ out_put: | |||
211 | bio_put(check); | 205 | bio_put(check); |
212 | } | 206 | } |
213 | 207 | ||
214 | #endif | 208 | int __bch_count_data(struct btree *b) |
215 | |||
216 | #ifdef CONFIG_BCACHE_EDEBUG | ||
217 | |||
218 | unsigned bch_count_data(struct btree *b) | ||
219 | { | 209 | { |
220 | unsigned ret = 0; | 210 | unsigned ret = 0; |
221 | struct btree_iter iter; | 211 | struct btree_iter iter; |
@@ -227,72 +217,60 @@ unsigned bch_count_data(struct btree *b) | |||
227 | return ret; | 217 | return ret; |
228 | } | 218 | } |
229 | 219 | ||
230 | static void vdump_bucket_and_panic(struct btree *b, const char *fmt, | 220 | void __bch_check_keys(struct btree *b, const char *fmt, ...) |
231 | va_list args) | ||
232 | { | ||
233 | unsigned i; | ||
234 | char buf[80]; | ||
235 | |||
236 | console_lock(); | ||
237 | |||
238 | for (i = 0; i <= b->nsets; i++) | ||
239 | dump_bset(b, b->sets[i].data); | ||
240 | |||
241 | vprintk(fmt, args); | ||
242 | |||
243 | console_unlock(); | ||
244 | |||
245 | bch_btree_to_text(buf, sizeof(buf), b); | ||
246 | panic("at %s\n", buf); | ||
247 | } | ||
248 | |||
249 | void bch_check_key_order_msg(struct btree *b, struct bset *i, | ||
250 | const char *fmt, ...) | ||
251 | { | ||
252 | struct bkey *k; | ||
253 | |||
254 | if (!i->keys) | ||
255 | return; | ||
256 | |||
257 | for (k = i->start; bkey_next(k) < end(i); k = bkey_next(k)) | ||
258 | if (skipped_backwards(b, k)) { | ||
259 | va_list args; | ||
260 | va_start(args, fmt); | ||
261 | |||
262 | vdump_bucket_and_panic(b, fmt, args); | ||
263 | va_end(args); | ||
264 | } | ||
265 | } | ||
266 | |||
267 | void bch_check_keys(struct btree *b, const char *fmt, ...) | ||
268 | { | 221 | { |
269 | va_list args; | 222 | va_list args; |
270 | struct bkey *k, *p = NULL; | 223 | struct bkey *k, *p = NULL; |
271 | struct btree_iter iter; | 224 | struct btree_iter iter; |
272 | 225 | const char *err; | |
273 | if (b->level) | ||
274 | return; | ||
275 | 226 | ||
276 | for_each_key(b, k, &iter) { | 227 | for_each_key(b, k, &iter) { |
277 | if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) { | 228 | if (!b->level) { |
278 | printk(KERN_ERR "Keys out of order:\n"); | 229 | err = "Keys out of order"; |
279 | goto bug; | 230 | if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) |
280 | } | 231 | goto bug; |
281 | 232 | ||
282 | if (bch_ptr_invalid(b, k)) | 233 | if (bch_ptr_invalid(b, k)) |
283 | continue; | 234 | continue; |
284 | 235 | ||
285 | if (p && bkey_cmp(p, &START_KEY(k)) > 0) { | 236 | err = "Overlapping keys"; |
286 | printk(KERN_ERR "Overlapping keys:\n"); | 237 | if (p && bkey_cmp(p, &START_KEY(k)) > 0) |
287 | goto bug; | 238 | goto bug; |
239 | } else { | ||
240 | if (bch_ptr_bad(b, k)) | ||
241 | continue; | ||
242 | |||
243 | err = "Duplicate keys"; | ||
244 | if (p && !bkey_cmp(p, k)) | ||
245 | goto bug; | ||
288 | } | 246 | } |
289 | p = k; | 247 | p = k; |
290 | } | 248 | } |
249 | |||
250 | err = "Key larger than btree node key"; | ||
251 | if (p && bkey_cmp(p, &b->key) > 0) | ||
252 | goto bug; | ||
253 | |||
291 | return; | 254 | return; |
292 | bug: | 255 | bug: |
256 | bch_dump_bucket(b); | ||
257 | |||
293 | va_start(args, fmt); | 258 | va_start(args, fmt); |
294 | vdump_bucket_and_panic(b, fmt, args); | 259 | vprintk(fmt, args); |
295 | va_end(args); | 260 | va_end(args); |
261 | |||
262 | panic("bcache error: %s:\n", err); | ||
263 | } | ||
264 | |||
265 | void bch_btree_iter_next_check(struct btree_iter *iter) | ||
266 | { | ||
267 | struct bkey *k = iter->data->k, *next = bkey_next(k); | ||
268 | |||
269 | if (next < iter->data->end && | ||
270 | bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) { | ||
271 | bch_dump_bucket(iter->b); | ||
272 | panic("Key skipped backwards\n"); | ||
273 | } | ||
296 | } | 274 | } |
297 | 275 | ||
298 | #endif | 276 | #endif |