aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/debug.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/debug.c')
-rw-r--r--drivers/md/bcache/debug.c185
1 files changed, 72 insertions, 113 deletions
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 88e6411eab4f..264fcfbd6290 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -8,7 +8,6 @@
8#include "bcache.h" 8#include "bcache.h"
9#include "btree.h" 9#include "btree.h"
10#include "debug.h" 10#include "debug.h"
11#include "request.h"
12 11
13#include <linux/console.h> 12#include <linux/console.h>
14#include <linux/debugfs.h> 13#include <linux/debugfs.h>
@@ -77,29 +76,17 @@ int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
77 return out - buf; 76 return out - buf;
78} 77}
79 78
80int bch_btree_to_text(char *buf, size_t size, const struct btree *b) 79#ifdef CONFIG_BCACHE_DEBUG
81{
82 return scnprintf(buf, size, "%zu level %i/%i",
83 PTR_BUCKET_NR(b->c, &b->key, 0),
84 b->level, b->c->root ? b->c->root->level : -1);
85}
86
87#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
88
89static bool skipped_backwards(struct btree *b, struct bkey *k)
90{
91 return bkey_cmp(k, (!b->level)
92 ? &START_KEY(bkey_next(k))
93 : bkey_next(k)) > 0;
94}
95 80
96static void dump_bset(struct btree *b, struct bset *i) 81static void dump_bset(struct btree *b, struct bset *i)
97{ 82{
98 struct bkey *k; 83 struct bkey *k, *next;
99 unsigned j; 84 unsigned j;
100 char buf[80]; 85 char buf[80];
101 86
102 for (k = i->start; k < end(i); k = bkey_next(k)) { 87 for (k = i->start; k < end(i); k = next) {
88 next = bkey_next(k);
89
103 bch_bkey_to_text(buf, sizeof(buf), k); 90 bch_bkey_to_text(buf, sizeof(buf), k);
104 printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b), 91 printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
105 (uint64_t *) k - i->d, i->keys, buf); 92 (uint64_t *) k - i->d, i->keys, buf);
@@ -115,15 +102,21 @@ static void dump_bset(struct btree *b, struct bset *i)
115 102
116 printk(" %s\n", bch_ptr_status(b->c, k)); 103 printk(" %s\n", bch_ptr_status(b->c, k));
117 104
118 if (bkey_next(k) < end(i) && 105 if (next < end(i) &&
119 skipped_backwards(b, k)) 106 bkey_cmp(k, !b->level ? &START_KEY(next) : next) > 0)
120 printk(KERN_ERR "Key skipped backwards\n"); 107 printk(KERN_ERR "Key skipped backwards\n");
121 } 108 }
122} 109}
123 110
124#endif 111static void bch_dump_bucket(struct btree *b)
112{
113 unsigned i;
125 114
126#ifdef CONFIG_BCACHE_DEBUG 115 console_lock();
116 for (i = 0; i <= b->nsets; i++)
117 dump_bset(b, b->sets[i].data);
118 console_unlock();
119}
127 120
128void bch_btree_verify(struct btree *b, struct bset *new) 121void bch_btree_verify(struct btree *b, struct bset *new)
129{ 122{
@@ -176,66 +169,44 @@ void bch_btree_verify(struct btree *b, struct bset *new)
176 mutex_unlock(&b->c->verify_lock); 169 mutex_unlock(&b->c->verify_lock);
177} 170}
178 171
179static void data_verify_endio(struct bio *bio, int error) 172void bch_data_verify(struct cached_dev *dc, struct bio *bio)
180{
181 struct closure *cl = bio->bi_private;
182 closure_put(cl);
183}
184
185void bch_data_verify(struct search *s)
186{ 173{
187 char name[BDEVNAME_SIZE]; 174 char name[BDEVNAME_SIZE];
188 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
189 struct closure *cl = &s->cl;
190 struct bio *check; 175 struct bio *check;
191 struct bio_vec *bv; 176 struct bio_vec *bv;
192 int i; 177 int i;
193 178
194 if (!s->unaligned_bvec) 179 check = bio_clone(bio, GFP_NOIO);
195 bio_for_each_segment(bv, s->orig_bio, i)
196 bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
197
198 check = bio_clone(s->orig_bio, GFP_NOIO);
199 if (!check) 180 if (!check)
200 return; 181 return;
201 182
202 if (bio_alloc_pages(check, GFP_NOIO)) 183 if (bio_alloc_pages(check, GFP_NOIO))
203 goto out_put; 184 goto out_put;
204 185
205 check->bi_rw = READ_SYNC; 186 submit_bio_wait(READ_SYNC, check);
206 check->bi_private = cl;
207 check->bi_end_io = data_verify_endio;
208
209 closure_bio_submit(check, cl, &dc->disk);
210 closure_sync(cl);
211 187
212 bio_for_each_segment(bv, s->orig_bio, i) { 188 bio_for_each_segment(bv, bio, i) {
213 void *p1 = kmap(bv->bv_page); 189 void *p1 = kmap_atomic(bv->bv_page);
214 void *p2 = kmap(check->bi_io_vec[i].bv_page); 190 void *p2 = page_address(check->bi_io_vec[i].bv_page);
215 191
216 if (memcmp(p1 + bv->bv_offset, 192 cache_set_err_on(memcmp(p1 + bv->bv_offset,
217 p2 + bv->bv_offset, 193 p2 + bv->bv_offset,
218 bv->bv_len)) 194 bv->bv_len),
219 printk(KERN_ERR 195 dc->disk.c,
220 "bcache (%s): verify failed at sector %llu\n", 196 "verify failed at dev %s sector %llu",
221 bdevname(dc->bdev, name), 197 bdevname(dc->bdev, name),
222 (uint64_t) s->orig_bio->bi_sector); 198 (uint64_t) bio->bi_sector);
223 199
224 kunmap(bv->bv_page); 200 kunmap_atomic(p1);
225 kunmap(check->bi_io_vec[i].bv_page);
226 } 201 }
227 202
228 __bio_for_each_segment(bv, check, i, 0) 203 bio_for_each_segment_all(bv, check, i)
229 __free_page(bv->bv_page); 204 __free_page(bv->bv_page);
230out_put: 205out_put:
231 bio_put(check); 206 bio_put(check);
232} 207}
233 208
234#endif 209int __bch_count_data(struct btree *b)
235
236#ifdef CONFIG_BCACHE_EDEBUG
237
238unsigned bch_count_data(struct btree *b)
239{ 210{
240 unsigned ret = 0; 211 unsigned ret = 0;
241 struct btree_iter iter; 212 struct btree_iter iter;
@@ -247,72 +218,60 @@ unsigned bch_count_data(struct btree *b)
247 return ret; 218 return ret;
248} 219}
249 220
250static void vdump_bucket_and_panic(struct btree *b, const char *fmt, 221void __bch_check_keys(struct btree *b, const char *fmt, ...)
251 va_list args)
252{
253 unsigned i;
254 char buf[80];
255
256 console_lock();
257
258 for (i = 0; i <= b->nsets; i++)
259 dump_bset(b, b->sets[i].data);
260
261 vprintk(fmt, args);
262
263 console_unlock();
264
265 bch_btree_to_text(buf, sizeof(buf), b);
266 panic("at %s\n", buf);
267}
268
269void bch_check_key_order_msg(struct btree *b, struct bset *i,
270 const char *fmt, ...)
271{
272 struct bkey *k;
273
274 if (!i->keys)
275 return;
276
277 for (k = i->start; bkey_next(k) < end(i); k = bkey_next(k))
278 if (skipped_backwards(b, k)) {
279 va_list args;
280 va_start(args, fmt);
281
282 vdump_bucket_and_panic(b, fmt, args);
283 va_end(args);
284 }
285}
286
287void bch_check_keys(struct btree *b, const char *fmt, ...)
288{ 222{
289 va_list args; 223 va_list args;
290 struct bkey *k, *p = NULL; 224 struct bkey *k, *p = NULL;
291 struct btree_iter iter; 225 struct btree_iter iter;
292 226 const char *err;
293 if (b->level)
294 return;
295 227
296 for_each_key(b, k, &iter) { 228 for_each_key(b, k, &iter) {
297 if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) { 229 if (!b->level) {
298 printk(KERN_ERR "Keys out of order:\n"); 230 err = "Keys out of order";
299 goto bug; 231 if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
300 } 232 goto bug;
301 233
302 if (bch_ptr_invalid(b, k)) 234 if (bch_ptr_invalid(b, k))
303 continue; 235 continue;
304 236
305 if (p && bkey_cmp(p, &START_KEY(k)) > 0) { 237 err = "Overlapping keys";
306 printk(KERN_ERR "Overlapping keys:\n"); 238 if (p && bkey_cmp(p, &START_KEY(k)) > 0)
307 goto bug; 239 goto bug;
240 } else {
241 if (bch_ptr_bad(b, k))
242 continue;
243
244 err = "Duplicate keys";
245 if (p && !bkey_cmp(p, k))
246 goto bug;
308 } 247 }
309 p = k; 248 p = k;
310 } 249 }
250
251 err = "Key larger than btree node key";
252 if (p && bkey_cmp(p, &b->key) > 0)
253 goto bug;
254
311 return; 255 return;
312bug: 256bug:
257 bch_dump_bucket(b);
258
313 va_start(args, fmt); 259 va_start(args, fmt);
314 vdump_bucket_and_panic(b, fmt, args); 260 vprintk(fmt, args);
315 va_end(args); 261 va_end(args);
262
263 panic("bcache error: %s:\n", err);
264}
265
266void bch_btree_iter_next_check(struct btree_iter *iter)
267{
268 struct bkey *k = iter->data->k, *next = bkey_next(k);
269
270 if (next < iter->data->end &&
271 bkey_cmp(k, iter->b->level ? next : &START_KEY(next)) > 0) {
272 bch_dump_bucket(iter->b);
273 panic("Key skipped backwards\n");
274 }
316} 275}
317 276
318#endif 277#endif