aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2009-09-15 08:03:51 -0400
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2009-09-15 10:05:06 -0400
commit77a7ae580c6cc8a0f0d5d7a7d61eb7e9fe8d99dc (patch)
tree299adfb136ca146d9bbfd7e171351ab971c1b93d /fs
parentd6d140097beb554daa967d3fb576e94ad2f82dcd (diff)
UBIFS: improve journal head debugging prints
Convert the journal head integer into the head name when printing debugging information. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/ubifs/debug.c19
-rw-r--r--fs/ubifs/debug.h2
-rw-r--r--fs/ubifs/io.c29
-rw-r--r--fs/ubifs/journal.c13
-rw-r--r--fs/ubifs/log.c15
5 files changed, 50 insertions, 28 deletions
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 8183ee0d4f9d..96900049bcd6 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -210,6 +210,20 @@ const char *dbg_cstate(int cmt_state)
210 } 210 }
211} 211}
212 212
213const char *dbg_jhead(int jhead)
214{
215 switch (jhead) {
216 case GCHD:
217 return "0 (GC)";
218 case BASEHD:
219 return "1 (base)";
220 case DATAHD:
221 return "2 (data)";
222 default:
223 return "unknown journal head";
224 }
225}
226
213static void dump_ch(const struct ubifs_ch *ch) 227static void dump_ch(const struct ubifs_ch *ch)
214{ 228{
215 printk(KERN_DEBUG "\tmagic %#x\n", le32_to_cpu(ch->magic)); 229 printk(KERN_DEBUG "\tmagic %#x\n", le32_to_cpu(ch->magic));
@@ -623,8 +637,9 @@ void dbg_dump_budg(struct ubifs_info *c)
623 /* If we are in R/O mode, journal heads do not exist */ 637 /* If we are in R/O mode, journal heads do not exist */
624 if (c->jheads) 638 if (c->jheads)
625 for (i = 0; i < c->jhead_cnt; i++) 639 for (i = 0; i < c->jhead_cnt; i++)
626 printk(KERN_DEBUG "\tjhead %d\t LEB %d\n", 640 printk(KERN_DEBUG "\tjhead %s\t LEB %d\n",
627 c->jheads[i].wbuf.jhead, c->jheads[i].wbuf.lnum); 641 dbg_jhead(c->jheads[i].wbuf.jhead),
642 c->jheads[i].wbuf.lnum);
628 for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) { 643 for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) {
629 bud = rb_entry(rb, struct ubifs_bud, rb); 644 bud = rb_entry(rb, struct ubifs_bud, rb);
630 printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum); 645 printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum);
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index fe2c9274c6a9..29d960101ea6 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -271,6 +271,7 @@ void ubifs_debugging_exit(struct ubifs_info *c);
271/* Dump functions */ 271/* Dump functions */
272const char *dbg_ntype(int type); 272const char *dbg_ntype(int type);
273const char *dbg_cstate(int cmt_state); 273const char *dbg_cstate(int cmt_state);
274const char *dbg_jhead(int jhead);
274const char *dbg_get_key_dump(const struct ubifs_info *c, 275const char *dbg_get_key_dump(const struct ubifs_info *c,
275 const union ubifs_key *key); 276 const union ubifs_key *key);
276void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode); 277void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode);
@@ -427,6 +428,7 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
427 428
428#define dbg_ntype(type) "" 429#define dbg_ntype(type) ""
429#define dbg_cstate(cmt_state) "" 430#define dbg_cstate(cmt_state) ""
431#define dbg_jhead(jhead) ""
430#define dbg_get_key_dump(c, key) ({}) 432#define dbg_get_key_dump(c, key) ({})
431#define dbg_dump_inode(c, inode) ({}) 433#define dbg_dump_inode(c, inode) ({})
432#define dbg_dump_node(c, node) ({}) 434#define dbg_dump_node(c, node) ({})
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 762a7d6cec73..e589fedaf1ef 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -297,7 +297,7 @@ static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
297{ 297{
298 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer); 298 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
299 299
300 dbg_io("jhead %d", wbuf->jhead); 300 dbg_io("jhead %s", dbg_jhead(wbuf->jhead));
301 wbuf->need_sync = 1; 301 wbuf->need_sync = 1;
302 wbuf->c->need_wbuf_sync = 1; 302 wbuf->c->need_wbuf_sync = 1;
303 ubifs_wake_up_bgt(wbuf->c); 303 ubifs_wake_up_bgt(wbuf->c);
@@ -314,7 +314,8 @@ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
314 314
315 if (wbuf->no_timer) 315 if (wbuf->no_timer)
316 return; 316 return;
317 dbg_io("set timer for jhead %d, %llu-%llu millisecs", wbuf->jhead, 317 dbg_io("set timer for jhead %s, %llu-%llu millisecs",
318 dbg_jhead(wbuf->jhead),
318 div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC), 319 div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC),
319 div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta, 320 div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta,
320 USEC_PER_SEC)); 321 USEC_PER_SEC));
@@ -351,8 +352,8 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
351 /* Write-buffer is empty or not seeked */ 352 /* Write-buffer is empty or not seeked */
352 return 0; 353 return 0;
353 354
354 dbg_io("LEB %d:%d, %d bytes, jhead %d", 355 dbg_io("LEB %d:%d, %d bytes, jhead %s",
355 wbuf->lnum, wbuf->offs, wbuf->used, wbuf->jhead); 356 wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
356 ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY)); 357 ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY));
357 ubifs_assert(!(wbuf->avail & 7)); 358 ubifs_assert(!(wbuf->avail & 7));
358 ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size); 359 ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size);
@@ -401,7 +402,7 @@ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
401{ 402{
402 const struct ubifs_info *c = wbuf->c; 403 const struct ubifs_info *c = wbuf->c;
403 404
404 dbg_io("LEB %d:%d, jhead %d", lnum, offs, wbuf->jhead); 405 dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead));
405 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt); 406 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt);
406 ubifs_assert(offs >= 0 && offs <= c->leb_size); 407 ubifs_assert(offs >= 0 && offs <= c->leb_size);
407 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7)); 408 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
@@ -508,9 +509,9 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
508 struct ubifs_info *c = wbuf->c; 509 struct ubifs_info *c = wbuf->c;
509 int err, written, n, aligned_len = ALIGN(len, 8), offs; 510 int err, written, n, aligned_len = ALIGN(len, 8), offs;
510 511
511 dbg_io("%d bytes (%s) to jhead %d wbuf at LEB %d:%d", len, 512 dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
512 dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->jhead, 513 dbg_ntype(((struct ubifs_ch *)buf)->node_type),
513 wbuf->lnum, wbuf->offs + wbuf->used); 514 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
514 ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); 515 ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
515 ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); 516 ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
516 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size); 517 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
@@ -535,8 +536,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
535 memcpy(wbuf->buf + wbuf->used, buf, len); 536 memcpy(wbuf->buf + wbuf->used, buf, len);
536 537
537 if (aligned_len == wbuf->avail) { 538 if (aligned_len == wbuf->avail) {
538 dbg_io("flush jhead %d wbuf to LEB %d:%d", 539 dbg_io("flush jhead %s wbuf to LEB %d:%d",
539 wbuf->jhead, wbuf->lnum, wbuf->offs); 540 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
540 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, 541 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf,
541 wbuf->offs, c->min_io_size, 542 wbuf->offs, c->min_io_size,
542 wbuf->dtype); 543 wbuf->dtype);
@@ -564,8 +565,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
564 * minimal I/O unit. We have to fill and flush write-buffer and switch 565 * minimal I/O unit. We have to fill and flush write-buffer and switch
565 * to the next min. I/O unit. 566 * to the next min. I/O unit.
566 */ 567 */
567 dbg_io("flush jhead %d wbuf to LEB %d:%d", 568 dbg_io("flush jhead %s wbuf to LEB %d:%d",
568 wbuf->jhead, wbuf->lnum, wbuf->offs); 569 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
569 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail); 570 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
570 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, 571 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,
571 c->min_io_size, wbuf->dtype); 572 c->min_io_size, wbuf->dtype);
@@ -698,8 +699,8 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
698 int err, rlen, overlap; 699 int err, rlen, overlap;
699 struct ubifs_ch *ch = buf; 700 struct ubifs_ch *ch = buf;
700 701
701 dbg_io("LEB %d:%d, %s, length %d, jhead %d", lnum, offs, 702 dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs,
702 dbg_ntype(type), len, wbuf->jhead); 703 dbg_ntype(type), len, dbg_jhead(wbuf->jhead));
703 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 704 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
704 ubifs_assert(!(offs & 7) && offs < c->leb_size); 705 ubifs_assert(!(offs & 7) && offs < c->leb_size);
705 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); 706 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 64b5f3a309f5..d321baeca68d 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -158,7 +158,7 @@ again:
158 * some. But the write-buffer mutex has to be unlocked because 158 * some. But the write-buffer mutex has to be unlocked because
159 * GC also takes it. 159 * GC also takes it.
160 */ 160 */
161 dbg_jnl("no free space jhead %d, run GC", jhead); 161 dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead));
162 mutex_unlock(&wbuf->io_mutex); 162 mutex_unlock(&wbuf->io_mutex);
163 163
164 lnum = ubifs_garbage_collect(c, 0); 164 lnum = ubifs_garbage_collect(c, 0);
@@ -173,7 +173,8 @@ again:
173 * because we dropped @wbuf->io_mutex, so try once 173 * because we dropped @wbuf->io_mutex, so try once
174 * again. 174 * again.
175 */ 175 */
176 dbg_jnl("GC couldn't make a free LEB for jhead %d", jhead); 176 dbg_jnl("GC couldn't make a free LEB for jhead %s",
177 dbg_jhead(jhead));
177 if (retries++ < 2) { 178 if (retries++ < 2) {
178 dbg_jnl("retry (%d)", retries); 179 dbg_jnl("retry (%d)", retries);
179 goto again; 180 goto again;
@@ -184,7 +185,7 @@ again:
184 } 185 }
185 186
186 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 187 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
187 dbg_jnl("got LEB %d for jhead %d", lnum, jhead); 188 dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead));
188 avail = c->leb_size - wbuf->offs - wbuf->used; 189 avail = c->leb_size - wbuf->offs - wbuf->used;
189 190
190 if (wbuf->lnum != -1 && avail >= len) { 191 if (wbuf->lnum != -1 && avail >= len) {
@@ -255,7 +256,8 @@ static int write_node(struct ubifs_info *c, int jhead, void *node, int len,
255 *lnum = c->jheads[jhead].wbuf.lnum; 256 *lnum = c->jheads[jhead].wbuf.lnum;
256 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; 257 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
257 258
258 dbg_jnl("jhead %d, LEB %d:%d, len %d", jhead, *lnum, *offs, len); 259 dbg_jnl("jhead %s, LEB %d:%d, len %d",
260 dbg_jhead(jhead), *lnum, *offs, len);
259 ubifs_prepare_node(c, node, len, 0); 261 ubifs_prepare_node(c, node, len, 0);
260 262
261 return ubifs_wbuf_write_nolock(wbuf, node, len); 263 return ubifs_wbuf_write_nolock(wbuf, node, len);
@@ -285,7 +287,8 @@ static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
285 287
286 *lnum = c->jheads[jhead].wbuf.lnum; 288 *lnum = c->jheads[jhead].wbuf.lnum;
287 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; 289 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
288 dbg_jnl("jhead %d, LEB %d:%d, len %d", jhead, *lnum, *offs, len); 290 dbg_jnl("jhead %s, LEB %d:%d, len %d",
291 dbg_jhead(jhead), *lnum, *offs, len);
289 292
290 err = ubifs_wbuf_write_nolock(wbuf, buf, len); 293 err = ubifs_wbuf_write_nolock(wbuf, buf, len);
291 if (err) 294 if (err)
diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
index 60dcf6cbd61b..c345e125f42c 100644
--- a/fs/ubifs/log.c
+++ b/fs/ubifs/log.c
@@ -169,8 +169,8 @@ void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
169 */ 169 */
170 c->bud_bytes += c->leb_size - bud->start; 170 c->bud_bytes += c->leb_size - bud->start;
171 171
172 dbg_log("LEB %d:%d, jhead %d, bud_bytes %lld", bud->lnum, 172 dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum,
173 bud->start, bud->jhead, c->bud_bytes); 173 bud->start, dbg_jhead(bud->jhead), c->bud_bytes);
174 spin_unlock(&c->buds_lock); 174 spin_unlock(&c->buds_lock);
175} 175}
176 176
@@ -355,16 +355,16 @@ static void remove_buds(struct ubifs_info *c)
355 * heads (non-closed buds). 355 * heads (non-closed buds).
356 */ 356 */
357 c->cmt_bud_bytes += wbuf->offs - bud->start; 357 c->cmt_bud_bytes += wbuf->offs - bud->start;
358 dbg_log("preserve %d:%d, jhead %d, bud bytes %d, " 358 dbg_log("preserve %d:%d, jhead %s, bud bytes %d, "
359 "cmt_bud_bytes %lld", bud->lnum, bud->start, 359 "cmt_bud_bytes %lld", bud->lnum, bud->start,
360 bud->jhead, wbuf->offs - bud->start, 360 dbg_jhead(bud->jhead), wbuf->offs - bud->start,
361 c->cmt_bud_bytes); 361 c->cmt_bud_bytes);
362 bud->start = wbuf->offs; 362 bud->start = wbuf->offs;
363 } else { 363 } else {
364 c->cmt_bud_bytes += c->leb_size - bud->start; 364 c->cmt_bud_bytes += c->leb_size - bud->start;
365 dbg_log("remove %d:%d, jhead %d, bud bytes %d, " 365 dbg_log("remove %d:%d, jhead %s, bud bytes %d, "
366 "cmt_bud_bytes %lld", bud->lnum, bud->start, 366 "cmt_bud_bytes %lld", bud->lnum, bud->start,
367 bud->jhead, c->leb_size - bud->start, 367 dbg_jhead(bud->jhead), c->leb_size - bud->start,
368 c->cmt_bud_bytes); 368 c->cmt_bud_bytes);
369 rb_erase(p1, &c->buds); 369 rb_erase(p1, &c->buds);
370 /* 370 /*
@@ -429,7 +429,8 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
429 if (lnum == -1 || offs == c->leb_size) 429 if (lnum == -1 || offs == c->leb_size)
430 continue; 430 continue;
431 431
432 dbg_log("add ref to LEB %d:%d for jhead %d", lnum, offs, i); 432 dbg_log("add ref to LEB %d:%d for jhead %s",
433 lnum, offs, dbg_jhead(i));
433 ref = buf + len; 434 ref = buf + len;
434 ref->ch.node_type = UBIFS_REF_NODE; 435 ref->ch.node_type = UBIFS_REF_NODE;
435 ref->lnum = cpu_to_le32(lnum); 436 ref->lnum = cpu_to_le32(lnum);