aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ubifs/journal.c
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2009-09-15 08:03:51 -0400
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2009-09-15 10:05:06 -0400
commit77a7ae580c6cc8a0f0d5d7a7d61eb7e9fe8d99dc (patch)
tree299adfb136ca146d9bbfd7e171351ab971c1b93d /fs/ubifs/journal.c
parentd6d140097beb554daa967d3fb576e94ad2f82dcd (diff)
UBIFS: improve journal head debugging prints
Convert the journal head integer into the head name when printing debugging information. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'fs/ubifs/journal.c')
-rw-r--r--fs/ubifs/journal.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 64b5f3a309f5..d321baeca68d 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -158,7 +158,7 @@ again:
158 * some. But the write-buffer mutex has to be unlocked because 158 * some. But the write-buffer mutex has to be unlocked because
159 * GC also takes it. 159 * GC also takes it.
160 */ 160 */
161 dbg_jnl("no free space jhead %d, run GC", jhead); 161 dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead));
162 mutex_unlock(&wbuf->io_mutex); 162 mutex_unlock(&wbuf->io_mutex);
163 163
164 lnum = ubifs_garbage_collect(c, 0); 164 lnum = ubifs_garbage_collect(c, 0);
@@ -173,7 +173,8 @@ again:
173 * because we dropped @wbuf->io_mutex, so try once 173 * because we dropped @wbuf->io_mutex, so try once
174 * again. 174 * again.
175 */ 175 */
176 dbg_jnl("GC couldn't make a free LEB for jhead %d", jhead); 176 dbg_jnl("GC couldn't make a free LEB for jhead %s",
177 dbg_jhead(jhead));
177 if (retries++ < 2) { 178 if (retries++ < 2) {
178 dbg_jnl("retry (%d)", retries); 179 dbg_jnl("retry (%d)", retries);
179 goto again; 180 goto again;
@@ -184,7 +185,7 @@ again:
184 } 185 }
185 186
186 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 187 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
187 dbg_jnl("got LEB %d for jhead %d", lnum, jhead); 188 dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead));
188 avail = c->leb_size - wbuf->offs - wbuf->used; 189 avail = c->leb_size - wbuf->offs - wbuf->used;
189 190
190 if (wbuf->lnum != -1 && avail >= len) { 191 if (wbuf->lnum != -1 && avail >= len) {
@@ -255,7 +256,8 @@ static int write_node(struct ubifs_info *c, int jhead, void *node, int len,
255 *lnum = c->jheads[jhead].wbuf.lnum; 256 *lnum = c->jheads[jhead].wbuf.lnum;
256 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; 257 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
257 258
258 dbg_jnl("jhead %d, LEB %d:%d, len %d", jhead, *lnum, *offs, len); 259 dbg_jnl("jhead %s, LEB %d:%d, len %d",
260 dbg_jhead(jhead), *lnum, *offs, len);
259 ubifs_prepare_node(c, node, len, 0); 261 ubifs_prepare_node(c, node, len, 0);
260 262
261 return ubifs_wbuf_write_nolock(wbuf, node, len); 263 return ubifs_wbuf_write_nolock(wbuf, node, len);
@@ -285,7 +287,8 @@ static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
285 287
286 *lnum = c->jheads[jhead].wbuf.lnum; 288 *lnum = c->jheads[jhead].wbuf.lnum;
287 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; 289 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
288 dbg_jnl("jhead %d, LEB %d:%d, len %d", jhead, *lnum, *offs, len); 290 dbg_jnl("jhead %s, LEB %d:%d, len %d",
291 dbg_jhead(jhead), *lnum, *offs, len);
289 292
290 err = ubifs_wbuf_write_nolock(wbuf, buf, len); 293 err = ubifs_wbuf_write_nolock(wbuf, buf, len);
291 if (err) 294 if (err)