aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ubifs
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2009-06-18 06:37:15 -0400
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2009-07-05 11:45:16 -0400
commit70aee2f153972f70fad5f7025134fec063f9efbe (patch)
treed6a00fe0a8c900935c360abf7a622078f8d22afa /fs/ubifs
parente3dc5a665d39112e98cfd5bbc7fda2963c00c12c (diff)
UBIFS: improve debugging messaged
1. Make the I/O debugging message print the journal head number. 2. Add prints to timer functions. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'fs/ubifs')
-rw-r--r--fs/ubifs/io.c26
1 files changed, 16 insertions, 10 deletions
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 2d41ae1d6607..2ef689a9a363 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -297,6 +297,7 @@ static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
297{ 297{
298 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer); 298 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
299 299
300 dbg_io("jhead %d", wbuf->jhead);
300 wbuf->need_sync = 1; 301 wbuf->need_sync = 1;
301 wbuf->c->need_wbuf_sync = 1; 302 wbuf->c->need_wbuf_sync = 1;
302 ubifs_wake_up_bgt(wbuf->c); 303 ubifs_wake_up_bgt(wbuf->c);
@@ -313,6 +314,9 @@ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
313 314
314 if (!ktime_to_ns(wbuf->softlimit)) 315 if (!ktime_to_ns(wbuf->softlimit))
315 return; 316 return;
317 dbg_io("set timer for jhead %d, %llu-%llu millisecs", wbuf->jhead,
318 ktime_to_ns(wbuf->softlimit)/USEC_PER_SEC,
319 (ktime_to_ns(wbuf->softlimit) + wbuf->delta)/USEC_PER_SEC);
316 hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta, 320 hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta,
317 HRTIMER_MODE_REL); 321 HRTIMER_MODE_REL);
318} 322}
@@ -349,8 +353,8 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
349 /* Write-buffer is empty or not seeked */ 353 /* Write-buffer is empty or not seeked */
350 return 0; 354 return 0;
351 355
352 dbg_io("LEB %d:%d, %d bytes", 356 dbg_io("LEB %d:%d, %d bytes, jhead %d",
353 wbuf->lnum, wbuf->offs, wbuf->used); 357 wbuf->lnum, wbuf->offs, wbuf->used, wbuf->jhead);
354 ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY)); 358 ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY));
355 ubifs_assert(!(wbuf->avail & 7)); 359 ubifs_assert(!(wbuf->avail & 7));
356 ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size); 360 ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size);
@@ -399,7 +403,7 @@ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
399{ 403{
400 const struct ubifs_info *c = wbuf->c; 404 const struct ubifs_info *c = wbuf->c;
401 405
402 dbg_io("LEB %d:%d", lnum, offs); 406 dbg_io("LEB %d:%d, jhead %d", lnum, offs, wbuf->jhead);
403 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt); 407 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt);
404 ubifs_assert(offs >= 0 && offs <= c->leb_size); 408 ubifs_assert(offs >= 0 && offs <= c->leb_size);
405 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7)); 409 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
@@ -506,9 +510,9 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
506 struct ubifs_info *c = wbuf->c; 510 struct ubifs_info *c = wbuf->c;
507 int err, written, n, aligned_len = ALIGN(len, 8), offs; 511 int err, written, n, aligned_len = ALIGN(len, 8), offs;
508 512
509 dbg_io("%d bytes (%s) to wbuf at LEB %d:%d", len, 513 dbg_io("%d bytes (%s) to jhead %d wbuf at LEB %d:%d", len,
510 dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->lnum, 514 dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->jhead,
511 wbuf->offs + wbuf->used); 515 wbuf->lnum, wbuf->offs + wbuf->used);
512 ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); 516 ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
513 ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); 517 ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
514 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size); 518 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
@@ -533,8 +537,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
533 memcpy(wbuf->buf + wbuf->used, buf, len); 537 memcpy(wbuf->buf + wbuf->used, buf, len);
534 538
535 if (aligned_len == wbuf->avail) { 539 if (aligned_len == wbuf->avail) {
536 dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum, 540 dbg_io("flush jhead %d wbuf to LEB %d:%d",
537 wbuf->offs); 541 wbuf->jhead, wbuf->lnum, wbuf->offs);
538 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, 542 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf,
539 wbuf->offs, c->min_io_size, 543 wbuf->offs, c->min_io_size,
540 wbuf->dtype); 544 wbuf->dtype);
@@ -562,7 +566,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
562 * minimal I/O unit. We have to fill and flush write-buffer and switch 566 * minimal I/O unit. We have to fill and flush write-buffer and switch
563 * to the next min. I/O unit. 567 * to the next min. I/O unit.
564 */ 568 */
565 dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum, wbuf->offs); 569 dbg_io("flush jhead %d wbuf to LEB %d:%d",
570 wbuf->jhead, wbuf->lnum, wbuf->offs);
566 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail); 571 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
567 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, 572 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,
568 c->min_io_size, wbuf->dtype); 573 c->min_io_size, wbuf->dtype);
@@ -695,7 +700,8 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
695 int err, rlen, overlap; 700 int err, rlen, overlap;
696 struct ubifs_ch *ch = buf; 701 struct ubifs_ch *ch = buf;
697 702
698 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len); 703 dbg_io("LEB %d:%d, %s, length %d, jhead %d", lnum, offs,
704 dbg_ntype(type), len, wbuf->jhead);
699 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 705 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
700 ubifs_assert(!(offs & 7) && offs < c->leb_size); 706 ubifs_assert(!(offs & 7) && offs < c->leb_size);
701 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); 707 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);