diff options
author | Joe Perches <joe@perches.com> | 2012-02-15 18:56:43 -0500 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2012-03-26 19:39:24 -0400 |
commit | 9c261b33a9c417ccaf07f41796be278d09d02d49 (patch) | |
tree | 6cf47f47364647dfbba845c0fd3f05539072175a /fs/jffs2 | |
parent | bf011f2ed53d587fdd8148c173c4f09ed77bdf1a (diff) |
jffs2: Convert most D1/D2 macros to jffs2_dbg
D1 and D2 macros are mostly uses to emit debugging messages.
Convert the logging uses of D1 & D2 to jffs2_dbg(level, fmt, ...)
to be a bit more consistent style with the rest of the kernel.
All jffs2_dbg output is now at KERN_DEBUG where some of
the previous uses were emitted at various KERN_<LEVEL>s.
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'fs/jffs2')
-rw-r--r-- | fs/jffs2/background.c | 23 | ||||
-rw-r--r-- | fs/jffs2/compr.c | 14 | ||||
-rw-r--r-- | fs/jffs2/compr_zlib.c | 31 | ||||
-rw-r--r-- | fs/jffs2/debug.h | 7 | ||||
-rw-r--r-- | fs/jffs2/dir.c | 32 | ||||
-rw-r--r-- | fs/jffs2/erase.c | 33 | ||||
-rw-r--r-- | fs/jffs2/file.c | 31 | ||||
-rw-r--r-- | fs/jffs2/fs.c | 43 | ||||
-rw-r--r-- | fs/jffs2/gc.c | 162 | ||||
-rw-r--r-- | fs/jffs2/nodemgmt.c | 161 | ||||
-rw-r--r-- | fs/jffs2/read.c | 47 | ||||
-rw-r--r-- | fs/jffs2/scan.c | 126 | ||||
-rw-r--r-- | fs/jffs2/super.c | 10 | ||||
-rw-r--r-- | fs/jffs2/symlink.c | 3 | ||||
-rw-r--r-- | fs/jffs2/wbuf.c | 67 | ||||
-rw-r--r-- | fs/jffs2/write.c | 74 |
16 files changed, 504 insertions, 360 deletions
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c index 404111b016c9..26ce06cd4d0f 100644 --- a/fs/jffs2/background.c +++ b/fs/jffs2/background.c | |||
@@ -47,7 +47,8 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) | |||
47 | ret = PTR_ERR(tsk); | 47 | ret = PTR_ERR(tsk); |
48 | } else { | 48 | } else { |
49 | /* Wait for it... */ | 49 | /* Wait for it... */ |
50 | D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", tsk->pid)); | 50 | jffs2_dbg(1, "JFFS2: Garbage collect thread is pid %d\n", |
51 | tsk->pid); | ||
51 | wait_for_completion(&c->gc_thread_start); | 52 | wait_for_completion(&c->gc_thread_start); |
52 | ret = tsk->pid; | 53 | ret = tsk->pid; |
53 | } | 54 | } |
@@ -60,7 +61,7 @@ void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c) | |||
60 | int wait = 0; | 61 | int wait = 0; |
61 | spin_lock(&c->erase_completion_lock); | 62 | spin_lock(&c->erase_completion_lock); |
62 | if (c->gc_task) { | 63 | if (c->gc_task) { |
63 | D1(printk(KERN_DEBUG "jffs2: Killing GC task %d\n", c->gc_task->pid)); | 64 | jffs2_dbg(1, "jffs2: Killing GC task %d\n", c->gc_task->pid); |
64 | send_sig(SIGKILL, c->gc_task, 1); | 65 | send_sig(SIGKILL, c->gc_task, 1); |
65 | wait = 1; | 66 | wait = 1; |
66 | } | 67 | } |
@@ -90,7 +91,7 @@ static int jffs2_garbage_collect_thread(void *_c) | |||
90 | if (!jffs2_thread_should_wake(c)) { | 91 | if (!jffs2_thread_should_wake(c)) { |
91 | set_current_state (TASK_INTERRUPTIBLE); | 92 | set_current_state (TASK_INTERRUPTIBLE); |
92 | spin_unlock(&c->erase_completion_lock); | 93 | spin_unlock(&c->erase_completion_lock); |
93 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n")); | 94 | jffs2_dbg(1, "%s(): sleeping...\n", __func__); |
94 | schedule(); | 95 | schedule(); |
95 | } else | 96 | } else |
96 | spin_unlock(&c->erase_completion_lock); | 97 | spin_unlock(&c->erase_completion_lock); |
@@ -109,7 +110,7 @@ static int jffs2_garbage_collect_thread(void *_c) | |||
109 | schedule_timeout_interruptible(msecs_to_jiffies(50)); | 110 | schedule_timeout_interruptible(msecs_to_jiffies(50)); |
110 | 111 | ||
111 | if (kthread_should_stop()) { | 112 | if (kthread_should_stop()) { |
112 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): kthread_stop() called.\n")); | 113 | jffs2_dbg(1, "%s(): kthread_stop() called\n", __func__); |
113 | goto die; | 114 | goto die; |
114 | } | 115 | } |
115 | 116 | ||
@@ -126,26 +127,30 @@ static int jffs2_garbage_collect_thread(void *_c) | |||
126 | 127 | ||
127 | switch(signr) { | 128 | switch(signr) { |
128 | case SIGSTOP: | 129 | case SIGSTOP: |
129 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n")); | 130 | jffs2_dbg(1, "%s(): SIGSTOP received\n", |
131 | __func__); | ||
130 | set_current_state(TASK_STOPPED); | 132 | set_current_state(TASK_STOPPED); |
131 | schedule(); | 133 | schedule(); |
132 | break; | 134 | break; |
133 | 135 | ||
134 | case SIGKILL: | 136 | case SIGKILL: |
135 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n")); | 137 | jffs2_dbg(1, "%s(): SIGKILL received\n", |
138 | __func__); | ||
136 | goto die; | 139 | goto die; |
137 | 140 | ||
138 | case SIGHUP: | 141 | case SIGHUP: |
139 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n")); | 142 | jffs2_dbg(1, "%s(): SIGHUP received\n", |
143 | __func__); | ||
140 | break; | 144 | break; |
141 | default: | 145 | default: |
142 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr)); | 146 | jffs2_dbg(1, "%s(): signal %ld received\n", |
147 | __func__, signr); | ||
143 | } | 148 | } |
144 | } | 149 | } |
145 | /* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */ | 150 | /* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */ |
146 | disallow_signal(SIGHUP); | 151 | disallow_signal(SIGHUP); |
147 | 152 | ||
148 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n")); | 153 | jffs2_dbg(1, "%s(): pass\n", __func__); |
149 | if (jffs2_garbage_collect_pass(c) == -ENOSPC) { | 154 | if (jffs2_garbage_collect_pass(c) == -ENOSPC) { |
150 | printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n"); | 155 | printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n"); |
151 | goto die; | 156 | goto die; |
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c index 96ed3c9ec3fc..be8e493d76bd 100644 --- a/fs/jffs2/compr.c +++ b/fs/jffs2/compr.c | |||
@@ -309,7 +309,7 @@ int jffs2_register_compressor(struct jffs2_compressor *comp) | |||
309 | comp->stat_compr_new_size=0; | 309 | comp->stat_compr_new_size=0; |
310 | comp->stat_compr_blocks=0; | 310 | comp->stat_compr_blocks=0; |
311 | comp->stat_decompr_blocks=0; | 311 | comp->stat_decompr_blocks=0; |
312 | D1(printk(KERN_DEBUG "Registering JFFS2 compressor \"%s\"\n", comp->name)); | 312 | jffs2_dbg(1, "Registering JFFS2 compressor \"%s\"\n", comp->name); |
313 | 313 | ||
314 | spin_lock(&jffs2_compressor_list_lock); | 314 | spin_lock(&jffs2_compressor_list_lock); |
315 | 315 | ||
@@ -332,9 +332,9 @@ out: | |||
332 | 332 | ||
333 | int jffs2_unregister_compressor(struct jffs2_compressor *comp) | 333 | int jffs2_unregister_compressor(struct jffs2_compressor *comp) |
334 | { | 334 | { |
335 | D2(struct jffs2_compressor *this;) | 335 | D2(struct jffs2_compressor *this); |
336 | 336 | ||
337 | D1(printk(KERN_DEBUG "Unregistering JFFS2 compressor \"%s\"\n", comp->name)); | 337 | jffs2_dbg(1, "Unregistering JFFS2 compressor \"%s\"\n", comp->name); |
338 | 338 | ||
339 | spin_lock(&jffs2_compressor_list_lock); | 339 | spin_lock(&jffs2_compressor_list_lock); |
340 | 340 | ||
@@ -377,17 +377,17 @@ int __init jffs2_compressors_init(void) | |||
377 | /* Setting default compression mode */ | 377 | /* Setting default compression mode */ |
378 | #ifdef CONFIG_JFFS2_CMODE_NONE | 378 | #ifdef CONFIG_JFFS2_CMODE_NONE |
379 | jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; | 379 | jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; |
380 | D1(printk(KERN_INFO "JFFS2: default compression mode: none\n");) | 380 | jffs2_dbg(1, "JFFS2: default compression mode: none\n"); |
381 | #else | 381 | #else |
382 | #ifdef CONFIG_JFFS2_CMODE_SIZE | 382 | #ifdef CONFIG_JFFS2_CMODE_SIZE |
383 | jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE; | 383 | jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE; |
384 | D1(printk(KERN_INFO "JFFS2: default compression mode: size\n");) | 384 | jffs2_dbg(1, "JFFS2: default compression mode: size\n"); |
385 | #else | 385 | #else |
386 | #ifdef CONFIG_JFFS2_CMODE_FAVOURLZO | 386 | #ifdef CONFIG_JFFS2_CMODE_FAVOURLZO |
387 | jffs2_compression_mode = JFFS2_COMPR_MODE_FAVOURLZO; | 387 | jffs2_compression_mode = JFFS2_COMPR_MODE_FAVOURLZO; |
388 | D1(printk(KERN_INFO "JFFS2: default compression mode: favourlzo\n");) | 388 | jffs2_dbg(1, "JFFS2: default compression mode: favourlzo\n"); |
389 | #else | 389 | #else |
390 | D1(printk(KERN_INFO "JFFS2: default compression mode: priority\n");) | 390 | jffs2_dbg(1, "JFFS2: default compression mode: priority\n"); |
391 | #endif | 391 | #endif |
392 | #endif | 392 | #endif |
393 | #endif | 393 | #endif |
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c index 4e7a138745ec..40979c928751 100644 --- a/fs/jffs2/compr_zlib.c +++ b/fs/jffs2/compr_zlib.c | |||
@@ -45,13 +45,15 @@ static int __init alloc_workspaces(void) | |||
45 | if (!def_strm.workspace) | 45 | if (!def_strm.workspace) |
46 | return -ENOMEM; | 46 | return -ENOMEM; |
47 | 47 | ||
48 | D1(printk(KERN_DEBUG "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL))); | 48 | jffs2_dbg(1, "Allocated %d bytes for deflate workspace\n", |
49 | zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL)); | ||
49 | inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); | 50 | inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); |
50 | if (!inf_strm.workspace) { | 51 | if (!inf_strm.workspace) { |
51 | vfree(def_strm.workspace); | 52 | vfree(def_strm.workspace); |
52 | return -ENOMEM; | 53 | return -ENOMEM; |
53 | } | 54 | } |
54 | D1(printk(KERN_DEBUG "Allocated %d bytes for inflate workspace\n", zlib_inflate_workspacesize())); | 55 | jffs2_dbg(1, "Allocated %d bytes for inflate workspace\n", |
56 | zlib_inflate_workspacesize()); | ||
55 | return 0; | 57 | return 0; |
56 | } | 58 | } |
57 | 59 | ||
@@ -91,13 +93,14 @@ static int jffs2_zlib_compress(unsigned char *data_in, | |||
91 | while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) { | 93 | while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) { |
92 | def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE); | 94 | def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE); |
93 | def_strm.avail_in = min((unsigned)(*sourcelen-def_strm.total_in), def_strm.avail_out); | 95 | def_strm.avail_in = min((unsigned)(*sourcelen-def_strm.total_in), def_strm.avail_out); |
94 | D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n", | 96 | jffs2_dbg(1, "calling deflate with avail_in %d, avail_out %d\n", |
95 | def_strm.avail_in, def_strm.avail_out)); | 97 | def_strm.avail_in, def_strm.avail_out); |
96 | ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH); | 98 | ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH); |
97 | D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", | 99 | jffs2_dbg(1, "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", |
98 | def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out)); | 100 | def_strm.avail_in, def_strm.avail_out, |
101 | def_strm.total_in, def_strm.total_out); | ||
99 | if (ret != Z_OK) { | 102 | if (ret != Z_OK) { |
100 | D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret)); | 103 | jffs2_dbg(1, "deflate in loop returned %d\n", ret); |
101 | zlib_deflateEnd(&def_strm); | 104 | zlib_deflateEnd(&def_strm); |
102 | mutex_unlock(&deflate_mutex); | 105 | mutex_unlock(&deflate_mutex); |
103 | return -1; | 106 | return -1; |
@@ -109,20 +112,20 @@ static int jffs2_zlib_compress(unsigned char *data_in, | |||
109 | zlib_deflateEnd(&def_strm); | 112 | zlib_deflateEnd(&def_strm); |
110 | 113 | ||
111 | if (ret != Z_STREAM_END) { | 114 | if (ret != Z_STREAM_END) { |
112 | D1(printk(KERN_DEBUG "final deflate returned %d\n", ret)); | 115 | jffs2_dbg(1, "final deflate returned %d\n", ret); |
113 | ret = -1; | 116 | ret = -1; |
114 | goto out; | 117 | goto out; |
115 | } | 118 | } |
116 | 119 | ||
117 | if (def_strm.total_out >= def_strm.total_in) { | 120 | if (def_strm.total_out >= def_strm.total_in) { |
118 | D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld; failing\n", | 121 | jffs2_dbg(1, "zlib compressed %ld bytes into %ld; failing\n", |
119 | def_strm.total_in, def_strm.total_out)); | 122 | def_strm.total_in, def_strm.total_out); |
120 | ret = -1; | 123 | ret = -1; |
121 | goto out; | 124 | goto out; |
122 | } | 125 | } |
123 | 126 | ||
124 | D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld\n", | 127 | jffs2_dbg(1, "zlib compressed %ld bytes into %ld\n", |
125 | def_strm.total_in, def_strm.total_out)); | 128 | def_strm.total_in, def_strm.total_out); |
126 | 129 | ||
127 | *dstlen = def_strm.total_out; | 130 | *dstlen = def_strm.total_out; |
128 | *sourcelen = def_strm.total_in; | 131 | *sourcelen = def_strm.total_in; |
@@ -155,13 +158,13 @@ static int jffs2_zlib_decompress(unsigned char *data_in, | |||
155 | ((data_in[0] & 0x0f) == Z_DEFLATED) && | 158 | ((data_in[0] & 0x0f) == Z_DEFLATED) && |
156 | !(((data_in[0]<<8) + data_in[1]) % 31)) { | 159 | !(((data_in[0]<<8) + data_in[1]) % 31)) { |
157 | 160 | ||
158 | D2(printk(KERN_DEBUG "inflate skipping adler32\n")); | 161 | jffs2_dbg(2, "inflate skipping adler32\n"); |
159 | wbits = -((data_in[0] >> 4) + 8); | 162 | wbits = -((data_in[0] >> 4) + 8); |
160 | inf_strm.next_in += 2; | 163 | inf_strm.next_in += 2; |
161 | inf_strm.avail_in -= 2; | 164 | inf_strm.avail_in -= 2; |
162 | } else { | 165 | } else { |
163 | /* Let this remain D1 for now -- it should never happen */ | 166 | /* Let this remain D1 for now -- it should never happen */ |
164 | D1(printk(KERN_DEBUG "inflate not skipping adler32\n")); | 167 | jffs2_dbg(1, "inflate not skipping adler32\n"); |
165 | } | 168 | } |
166 | 169 | ||
167 | 170 | ||
diff --git a/fs/jffs2/debug.h b/fs/jffs2/debug.h index c4f8eef5ca68..7782c6355a5f 100644 --- a/fs/jffs2/debug.h +++ b/fs/jffs2/debug.h | |||
@@ -51,6 +51,7 @@ | |||
51 | * superseded by nicer dbg_xxx() macros... | 51 | * superseded by nicer dbg_xxx() macros... |
52 | */ | 52 | */ |
53 | #if CONFIG_JFFS2_FS_DEBUG > 0 | 53 | #if CONFIG_JFFS2_FS_DEBUG > 0 |
54 | #define DEBUG | ||
54 | #define D1(x) x | 55 | #define D1(x) x |
55 | #else | 56 | #else |
56 | #define D1(x) | 57 | #define D1(x) |
@@ -62,6 +63,12 @@ | |||
62 | #define D2(x) | 63 | #define D2(x) |
63 | #endif | 64 | #endif |
64 | 65 | ||
66 | #define jffs2_dbg(level, fmt, ...) \ | ||
67 | do { \ | ||
68 | if (CONFIG_JFFS2_FS_DEBUG >= level) \ | ||
69 | pr_debug(fmt, ##__VA_ARGS__); \ | ||
70 | } while (0) | ||
71 | |||
65 | /* The prefixes of JFFS2 messages */ | 72 | /* The prefixes of JFFS2 messages */ |
66 | #define JFFS2_DBG_PREFIX "[JFFS2 DBG]" | 73 | #define JFFS2_DBG_PREFIX "[JFFS2 DBG]" |
67 | #define JFFS2_ERR_PREFIX "JFFS2 error:" | 74 | #define JFFS2_ERR_PREFIX "JFFS2 error:" |
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 973ac5822bd7..01a07af0021d 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c | |||
@@ -79,7 +79,7 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, | |||
79 | uint32_t ino = 0; | 79 | uint32_t ino = 0; |
80 | struct inode *inode = NULL; | 80 | struct inode *inode = NULL; |
81 | 81 | ||
82 | D1(printk(KERN_DEBUG "jffs2_lookup()\n")); | 82 | jffs2_dbg(1, "jffs2_lookup()\n"); |
83 | 83 | ||
84 | if (target->d_name.len > JFFS2_MAX_NAME_LEN) | 84 | if (target->d_name.len > JFFS2_MAX_NAME_LEN) |
85 | return ERR_PTR(-ENAMETOOLONG); | 85 | return ERR_PTR(-ENAMETOOLONG); |
@@ -119,21 +119,22 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
119 | struct jffs2_full_dirent *fd; | 119 | struct jffs2_full_dirent *fd; |
120 | unsigned long offset, curofs; | 120 | unsigned long offset, curofs; |
121 | 121 | ||
122 | D1(printk(KERN_DEBUG "jffs2_readdir() for dir_i #%lu\n", filp->f_path.dentry->d_inode->i_ino)); | 122 | jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n", |
123 | filp->f_path.dentry->d_inode->i_ino); | ||
123 | 124 | ||
124 | f = JFFS2_INODE_INFO(inode); | 125 | f = JFFS2_INODE_INFO(inode); |
125 | 126 | ||
126 | offset = filp->f_pos; | 127 | offset = filp->f_pos; |
127 | 128 | ||
128 | if (offset == 0) { | 129 | if (offset == 0) { |
129 | D1(printk(KERN_DEBUG "Dirent 0: \".\", ino #%lu\n", inode->i_ino)); | 130 | jffs2_dbg(1, "Dirent 0: \".\", ino #%lu\n", inode->i_ino); |
130 | if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) | 131 | if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) |
131 | goto out; | 132 | goto out; |
132 | offset++; | 133 | offset++; |
133 | } | 134 | } |
134 | if (offset == 1) { | 135 | if (offset == 1) { |
135 | unsigned long pino = parent_ino(filp->f_path.dentry); | 136 | unsigned long pino = parent_ino(filp->f_path.dentry); |
136 | D1(printk(KERN_DEBUG "Dirent 1: \"..\", ino #%lu\n", pino)); | 137 | jffs2_dbg(1, "Dirent 1: \"..\", ino #%lu\n", pino); |
137 | if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0) | 138 | if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0) |
138 | goto out; | 139 | goto out; |
139 | offset++; | 140 | offset++; |
@@ -146,16 +147,18 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
146 | curofs++; | 147 | curofs++; |
147 | /* First loop: curofs = 2; offset = 2 */ | 148 | /* First loop: curofs = 2; offset = 2 */ |
148 | if (curofs < offset) { | 149 | if (curofs < offset) { |
149 | D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", | 150 | jffs2_dbg(2, "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", |
150 | fd->name, fd->ino, fd->type, curofs, offset)); | 151 | fd->name, fd->ino, fd->type, curofs, offset); |
151 | continue; | 152 | continue; |
152 | } | 153 | } |
153 | if (!fd->ino) { | 154 | if (!fd->ino) { |
154 | D2(printk(KERN_DEBUG "Skipping deletion dirent \"%s\"\n", fd->name)); | 155 | jffs2_dbg(2, "Skipping deletion dirent \"%s\"\n", |
156 | fd->name); | ||
155 | offset++; | 157 | offset++; |
156 | continue; | 158 | continue; |
157 | } | 159 | } |
158 | D2(printk(KERN_DEBUG "Dirent %ld: \"%s\", ino #%u, type %d\n", offset, fd->name, fd->ino, fd->type)); | 160 | jffs2_dbg(2, "Dirent %ld: \"%s\", ino #%u, type %d\n", |
161 | offset, fd->name, fd->ino, fd->type); | ||
159 | if (filldir(dirent, fd->name, strlen(fd->name), offset, fd->ino, fd->type) < 0) | 162 | if (filldir(dirent, fd->name, strlen(fd->name), offset, fd->ino, fd->type) < 0) |
160 | break; | 163 | break; |
161 | offset++; | 164 | offset++; |
@@ -184,12 +187,12 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, | |||
184 | 187 | ||
185 | c = JFFS2_SB_INFO(dir_i->i_sb); | 188 | c = JFFS2_SB_INFO(dir_i->i_sb); |
186 | 189 | ||
187 | D1(printk(KERN_DEBUG "jffs2_create()\n")); | 190 | jffs2_dbg(1, "%s()\n", __func__); |
188 | 191 | ||
189 | inode = jffs2_new_inode(dir_i, mode, ri); | 192 | inode = jffs2_new_inode(dir_i, mode, ri); |
190 | 193 | ||
191 | if (IS_ERR(inode)) { | 194 | if (IS_ERR(inode)) { |
192 | D1(printk(KERN_DEBUG "jffs2_new_inode() failed\n")); | 195 | jffs2_dbg(1, "jffs2_new_inode() failed\n"); |
193 | jffs2_free_raw_inode(ri); | 196 | jffs2_free_raw_inode(ri); |
194 | return PTR_ERR(inode); | 197 | return PTR_ERR(inode); |
195 | } | 198 | } |
@@ -217,9 +220,9 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, | |||
217 | 220 | ||
218 | jffs2_free_raw_inode(ri); | 221 | jffs2_free_raw_inode(ri); |
219 | 222 | ||
220 | D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n", | 223 | jffs2_dbg(1, "%s(): Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n", |
221 | inode->i_ino, inode->i_mode, inode->i_nlink, | 224 | __func__, inode->i_ino, inode->i_mode, inode->i_nlink, |
222 | f->inocache->pino_nlink, inode->i_mapping->nrpages)); | 225 | f->inocache->pino_nlink, inode->i_mapping->nrpages); |
223 | 226 | ||
224 | d_instantiate(dentry, inode); | 227 | d_instantiate(dentry, inode); |
225 | unlock_new_inode(inode); | 228 | unlock_new_inode(inode); |
@@ -369,7 +372,8 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
369 | goto fail; | 372 | goto fail; |
370 | } | 373 | } |
371 | 374 | ||
372 | D1(printk(KERN_DEBUG "jffs2_symlink: symlink's target '%s' cached\n", (char *)f->target)); | 375 | jffs2_dbg(1, "%s(): symlink's target '%s' cached\n", |
376 | __func__, (char *)f->target); | ||
373 | 377 | ||
374 | /* No data here. Only a metadata node, which will be | 378 | /* No data here. Only a metadata node, which will be |
375 | obsoleted by the first data write | 379 | obsoleted by the first data write |
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index eafb8d37a6fb..ee1cd98fdbf2 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c | |||
@@ -46,8 +46,9 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, | |||
46 | #else /* Linux */ | 46 | #else /* Linux */ |
47 | struct erase_info *instr; | 47 | struct erase_info *instr; |
48 | 48 | ||
49 | D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#08x (range %#08x-%#08x)\n", | 49 | jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n", |
50 | jeb->offset, jeb->offset, jeb->offset + c->sector_size)); | 50 | __func__, |
51 | jeb->offset, jeb->offset, jeb->offset + c->sector_size); | ||
51 | instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); | 52 | instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); |
52 | if (!instr) { | 53 | if (!instr) { |
53 | printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); | 54 | printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); |
@@ -84,7 +85,8 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, | |||
84 | 85 | ||
85 | if (ret == -ENOMEM || ret == -EAGAIN) { | 86 | if (ret == -ENOMEM || ret == -EAGAIN) { |
86 | /* Erase failed immediately. Refile it on the list */ | 87 | /* Erase failed immediately. Refile it on the list */ |
87 | D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret)); | 88 | jffs2_dbg(1, "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", |
89 | jeb->offset, ret); | ||
88 | mutex_lock(&c->erase_free_sem); | 90 | mutex_lock(&c->erase_free_sem); |
89 | spin_lock(&c->erase_completion_lock); | 91 | spin_lock(&c->erase_completion_lock); |
90 | list_move(&jeb->list, &c->erase_pending_list); | 92 | list_move(&jeb->list, &c->erase_pending_list); |
@@ -125,13 +127,14 @@ int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) | |||
125 | 127 | ||
126 | work_done++; | 128 | work_done++; |
127 | if (!--count) { | 129 | if (!--count) { |
128 | D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n")); | 130 | jffs2_dbg(1, "Count reached. jffs2_erase_pending_blocks leaving\n"); |
129 | goto done; | 131 | goto done; |
130 | } | 132 | } |
131 | 133 | ||
132 | } else if (!list_empty(&c->erase_pending_list)) { | 134 | } else if (!list_empty(&c->erase_pending_list)) { |
133 | jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list); | 135 | jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list); |
134 | D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset)); | 136 | jffs2_dbg(1, "Starting erase of pending block 0x%08x\n", |
137 | jeb->offset); | ||
135 | list_del(&jeb->list); | 138 | list_del(&jeb->list); |
136 | c->erasing_size += c->sector_size; | 139 | c->erasing_size += c->sector_size; |
137 | c->wasted_size -= jeb->wasted_size; | 140 | c->wasted_size -= jeb->wasted_size; |
@@ -159,13 +162,13 @@ int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) | |||
159 | spin_unlock(&c->erase_completion_lock); | 162 | spin_unlock(&c->erase_completion_lock); |
160 | mutex_unlock(&c->erase_free_sem); | 163 | mutex_unlock(&c->erase_free_sem); |
161 | done: | 164 | done: |
162 | D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n")); | 165 | jffs2_dbg(1, "jffs2_erase_pending_blocks completed\n"); |
163 | return work_done; | 166 | return work_done; |
164 | } | 167 | } |
165 | 168 | ||
166 | static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 169 | static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) |
167 | { | 170 | { |
168 | D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset)); | 171 | jffs2_dbg(1, "Erase completed successfully at 0x%08x\n", jeb->offset); |
169 | mutex_lock(&c->erase_free_sem); | 172 | mutex_lock(&c->erase_free_sem); |
170 | spin_lock(&c->erase_completion_lock); | 173 | spin_lock(&c->erase_completion_lock); |
171 | list_move_tail(&jeb->list, &c->erase_complete_list); | 174 | list_move_tail(&jeb->list, &c->erase_complete_list); |
@@ -269,8 +272,8 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | |||
269 | return; | 272 | return; |
270 | } | 273 | } |
271 | 274 | ||
272 | D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from ino #%u\n", | 275 | jffs2_dbg(1, "Removed nodes in range 0x%08x-0x%08x from ino #%u\n", |
273 | jeb->offset, jeb->offset + c->sector_size, ic->ino)); | 276 | jeb->offset, jeb->offset + c->sector_size, ic->ino); |
274 | 277 | ||
275 | D2({ | 278 | D2({ |
276 | int i=0; | 279 | int i=0; |
@@ -310,7 +313,8 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | |||
310 | void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 313 | void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) |
311 | { | 314 | { |
312 | struct jffs2_raw_node_ref *block, *ref; | 315 | struct jffs2_raw_node_ref *block, *ref; |
313 | D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset)); | 316 | jffs2_dbg(1, "Freeing all node refs for eraseblock offset 0x%08x\n", |
317 | jeb->offset); | ||
314 | 318 | ||
315 | block = ref = jeb->first_node; | 319 | block = ref = jeb->first_node; |
316 | 320 | ||
@@ -342,12 +346,13 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl | |||
342 | &ebuf, NULL); | 346 | &ebuf, NULL); |
343 | if (ret != -EOPNOTSUPP) { | 347 | if (ret != -EOPNOTSUPP) { |
344 | if (ret) { | 348 | if (ret) { |
345 | D1(printk(KERN_DEBUG "MTD point failed %d\n", ret)); | 349 | jffs2_dbg(1, "MTD point failed %d\n", ret); |
346 | goto do_flash_read; | 350 | goto do_flash_read; |
347 | } | 351 | } |
348 | if (retlen < c->sector_size) { | 352 | if (retlen < c->sector_size) { |
349 | /* Don't muck about if it won't let us point to the whole erase sector */ | 353 | /* Don't muck about if it won't let us point to the whole erase sector */ |
350 | D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen)); | 354 | jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n", |
355 | retlen); | ||
351 | mtd_unpoint(c->mtd, jeb->offset, retlen); | 356 | mtd_unpoint(c->mtd, jeb->offset, retlen); |
352 | goto do_flash_read; | 357 | goto do_flash_read; |
353 | } | 358 | } |
@@ -372,7 +377,7 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl | |||
372 | return -EAGAIN; | 377 | return -EAGAIN; |
373 | } | 378 | } |
374 | 379 | ||
375 | D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset)); | 380 | jffs2_dbg(1, "Verifying erase at 0x%08x\n", jeb->offset); |
376 | 381 | ||
377 | for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) { | 382 | for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) { |
378 | uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); | 383 | uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); |
@@ -422,7 +427,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
422 | } | 427 | } |
423 | 428 | ||
424 | /* Write the erase complete marker */ | 429 | /* Write the erase complete marker */ |
425 | D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); | 430 | jffs2_dbg(1, "Writing erased marker to block at 0x%08x\n", jeb->offset); |
426 | bad_offset = jeb->offset; | 431 | bad_offset = jeb->offset; |
427 | 432 | ||
428 | /* Cleanmarker in oob area or no cleanmarker at all ? */ | 433 | /* Cleanmarker in oob area or no cleanmarker at all ? */ |
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 61e6723535b9..5ffc3562ae23 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c | |||
@@ -85,7 +85,8 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) | |||
85 | unsigned char *pg_buf; | 85 | unsigned char *pg_buf; |
86 | int ret; | 86 | int ret; |
87 | 87 | ||
88 | D2(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT)); | 88 | jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n", |
89 | __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT); | ||
89 | 90 | ||
90 | BUG_ON(!PageLocked(pg)); | 91 | BUG_ON(!PageLocked(pg)); |
91 | 92 | ||
@@ -105,7 +106,7 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) | |||
105 | flush_dcache_page(pg); | 106 | flush_dcache_page(pg); |
106 | kunmap(pg); | 107 | kunmap(pg); |
107 | 108 | ||
108 | D2(printk(KERN_DEBUG "readpage finished\n")); | 109 | jffs2_dbg(2, "readpage finished\n"); |
109 | return ret; | 110 | return ret; |
110 | } | 111 | } |
111 | 112 | ||
@@ -144,7 +145,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, | |||
144 | return -ENOMEM; | 145 | return -ENOMEM; |
145 | *pagep = pg; | 146 | *pagep = pg; |
146 | 147 | ||
147 | D1(printk(KERN_DEBUG "jffs2_write_begin()\n")); | 148 | jffs2_dbg(1, "%s()\n", __func__); |
148 | 149 | ||
149 | if (pageofs > inode->i_size) { | 150 | if (pageofs > inode->i_size) { |
150 | /* Make new hole frag from old EOF to new page */ | 151 | /* Make new hole frag from old EOF to new page */ |
@@ -153,8 +154,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, | |||
153 | struct jffs2_full_dnode *fn; | 154 | struct jffs2_full_dnode *fn; |
154 | uint32_t alloc_len; | 155 | uint32_t alloc_len; |
155 | 156 | ||
156 | D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", | 157 | jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", |
157 | (unsigned int)inode->i_size, pageofs)); | 158 | (unsigned int)inode->i_size, pageofs); |
158 | 159 | ||
159 | ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, | 160 | ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, |
160 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | 161 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); |
@@ -198,7 +199,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, | |||
198 | f->metadata = NULL; | 199 | f->metadata = NULL; |
199 | } | 200 | } |
200 | if (ret) { | 201 | if (ret) { |
201 | D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n", ret)); | 202 | jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n", |
203 | ret); | ||
202 | jffs2_mark_node_obsolete(c, fn->raw); | 204 | jffs2_mark_node_obsolete(c, fn->raw); |
203 | jffs2_free_full_dnode(fn); | 205 | jffs2_free_full_dnode(fn); |
204 | jffs2_complete_reservation(c); | 206 | jffs2_complete_reservation(c); |
@@ -222,7 +224,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, | |||
222 | if (ret) | 224 | if (ret) |
223 | goto out_page; | 225 | goto out_page; |
224 | } | 226 | } |
225 | D1(printk(KERN_DEBUG "end write_begin(). pg->flags %lx\n", pg->flags)); | 227 | jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); |
226 | return ret; | 228 | return ret; |
227 | 229 | ||
228 | out_page: | 230 | out_page: |
@@ -248,8 +250,9 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, | |||
248 | int ret = 0; | 250 | int ret = 0; |
249 | uint32_t writtenlen = 0; | 251 | uint32_t writtenlen = 0; |
250 | 252 | ||
251 | D1(printk(KERN_DEBUG "jffs2_write_end(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", | 253 | jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", |
252 | inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags)); | 254 | __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT, |
255 | start, end, pg->flags); | ||
253 | 256 | ||
254 | /* We need to avoid deadlock with page_cache_read() in | 257 | /* We need to avoid deadlock with page_cache_read() in |
255 | jffs2_garbage_collect_pass(). So the page must be | 258 | jffs2_garbage_collect_pass(). So the page must be |
@@ -268,7 +271,8 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, | |||
268 | ri = jffs2_alloc_raw_inode(); | 271 | ri = jffs2_alloc_raw_inode(); |
269 | 272 | ||
270 | if (!ri) { | 273 | if (!ri) { |
271 | D1(printk(KERN_DEBUG "jffs2_write_end(): Allocation of raw inode failed\n")); | 274 | jffs2_dbg(1, "%s(): Allocation of raw inode failed\n", |
275 | __func__); | ||
272 | unlock_page(pg); | 276 | unlock_page(pg); |
273 | page_cache_release(pg); | 277 | page_cache_release(pg); |
274 | return -ENOMEM; | 278 | return -ENOMEM; |
@@ -315,13 +319,14 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, | |||
315 | /* generic_file_write has written more to the page cache than we've | 319 | /* generic_file_write has written more to the page cache than we've |
316 | actually written to the medium. Mark the page !Uptodate so that | 320 | actually written to the medium. Mark the page !Uptodate so that |
317 | it gets reread */ | 321 | it gets reread */ |
318 | D1(printk(KERN_DEBUG "jffs2_write_end(): Not all bytes written. Marking page !uptodate\n")); | 322 | jffs2_dbg(1, "%s(): Not all bytes written. Marking page !uptodate\n", |
323 | __func__); | ||
319 | SetPageError(pg); | 324 | SetPageError(pg); |
320 | ClearPageUptodate(pg); | 325 | ClearPageUptodate(pg); |
321 | } | 326 | } |
322 | 327 | ||
323 | D1(printk(KERN_DEBUG "jffs2_write_end() returning %d\n", | 328 | jffs2_dbg(1, "%s() returning %d\n", |
324 | writtenlen > 0 ? writtenlen : ret)); | 329 | __func__, writtenlen > 0 ? writtenlen : ret); |
325 | unlock_page(pg); | 330 | unlock_page(pg); |
326 | page_cache_release(pg); | 331 | page_cache_release(pg); |
327 | return writtenlen > 0 ? writtenlen : ret; | 332 | return writtenlen > 0 ? writtenlen : ret; |
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 2e0123867cb1..9a8c97c264c2 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c | |||
@@ -39,7 +39,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
39 | int ret; | 39 | int ret; |
40 | int alloc_type = ALLOC_NORMAL; | 40 | int alloc_type = ALLOC_NORMAL; |
41 | 41 | ||
42 | D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); | 42 | jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino); |
43 | 43 | ||
44 | /* Special cases - we don't want more than one data node | 44 | /* Special cases - we don't want more than one data node |
45 | for these types on the medium at any time. So setattr | 45 | for these types on the medium at any time. So setattr |
@@ -50,7 +50,8 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
50 | /* For these, we don't actually need to read the old node */ | 50 | /* For these, we don't actually need to read the old node */ |
51 | mdatalen = jffs2_encode_dev(&dev, inode->i_rdev); | 51 | mdatalen = jffs2_encode_dev(&dev, inode->i_rdev); |
52 | mdata = (char *)&dev; | 52 | mdata = (char *)&dev; |
53 | D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen)); | 53 | jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n", |
54 | __func__, mdatalen); | ||
54 | } else if (S_ISLNK(inode->i_mode)) { | 55 | } else if (S_ISLNK(inode->i_mode)) { |
55 | mutex_lock(&f->sem); | 56 | mutex_lock(&f->sem); |
56 | mdatalen = f->metadata->size; | 57 | mdatalen = f->metadata->size; |
@@ -66,7 +67,8 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
66 | return ret; | 67 | return ret; |
67 | } | 68 | } |
68 | mutex_unlock(&f->sem); | 69 | mutex_unlock(&f->sem); |
69 | D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen)); | 70 | jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n", |
71 | __func__, mdatalen); | ||
70 | } | 72 | } |
71 | 73 | ||
72 | ri = jffs2_alloc_raw_inode(); | 74 | ri = jffs2_alloc_raw_inode(); |
@@ -233,7 +235,8 @@ void jffs2_evict_inode (struct inode *inode) | |||
233 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | 235 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); |
234 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | 236 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); |
235 | 237 | ||
236 | D1(printk(KERN_DEBUG "jffs2_evict_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); | 238 | jffs2_dbg(1, "%s(): ino #%lu mode %o\n", |
239 | __func__, inode->i_ino, inode->i_mode); | ||
237 | truncate_inode_pages(&inode->i_data, 0); | 240 | truncate_inode_pages(&inode->i_data, 0); |
238 | end_writeback(inode); | 241 | end_writeback(inode); |
239 | jffs2_do_clear_inode(c, f); | 242 | jffs2_do_clear_inode(c, f); |
@@ -249,7 +252,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino) | |||
249 | dev_t rdev = 0; | 252 | dev_t rdev = 0; |
250 | int ret; | 253 | int ret; |
251 | 254 | ||
252 | D1(printk(KERN_DEBUG "jffs2_iget(): ino == %lu\n", ino)); | 255 | jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino); |
253 | 256 | ||
254 | inode = iget_locked(sb, ino); | 257 | inode = iget_locked(sb, ino); |
255 | if (!inode) | 258 | if (!inode) |
@@ -320,7 +323,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino) | |||
320 | printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size); | 323 | printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size); |
321 | goto error_io; | 324 | goto error_io; |
322 | } | 325 | } |
323 | D1(printk(KERN_DEBUG "Reading device numbers from flash\n")); | 326 | jffs2_dbg(1, "Reading device numbers from flash\n"); |
324 | ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size); | 327 | ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size); |
325 | if (ret < 0) { | 328 | if (ret < 0) { |
326 | /* Eep */ | 329 | /* Eep */ |
@@ -344,7 +347,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino) | |||
344 | 347 | ||
345 | mutex_unlock(&f->sem); | 348 | mutex_unlock(&f->sem); |
346 | 349 | ||
347 | D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n")); | 350 | jffs2_dbg(1, "jffs2_read_inode() returning\n"); |
348 | unlock_new_inode(inode); | 351 | unlock_new_inode(inode); |
349 | return inode; | 352 | return inode; |
350 | 353 | ||
@@ -362,11 +365,13 @@ void jffs2_dirty_inode(struct inode *inode, int flags) | |||
362 | struct iattr iattr; | 365 | struct iattr iattr; |
363 | 366 | ||
364 | if (!(inode->i_state & I_DIRTY_DATASYNC)) { | 367 | if (!(inode->i_state & I_DIRTY_DATASYNC)) { |
365 | D2(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino)); | 368 | jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n", |
369 | __func__, inode->i_ino); | ||
366 | return; | 370 | return; |
367 | } | 371 | } |
368 | 372 | ||
369 | D1(printk(KERN_DEBUG "jffs2_dirty_inode() calling setattr() for ino #%lu\n", inode->i_ino)); | 373 | jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n", |
374 | __func__, inode->i_ino); | ||
370 | 375 | ||
371 | iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME; | 376 | iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME; |
372 | iattr.ia_mode = inode->i_mode; | 377 | iattr.ia_mode = inode->i_mode; |
@@ -414,7 +419,8 @@ struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_r | |||
414 | struct jffs2_inode_info *f; | 419 | struct jffs2_inode_info *f; |
415 | int ret; | 420 | int ret; |
416 | 421 | ||
417 | D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode)); | 422 | jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n", |
423 | __func__, dir_i->i_ino, mode); | ||
418 | 424 | ||
419 | c = JFFS2_SB_INFO(sb); | 425 | c = JFFS2_SB_INFO(sb); |
420 | 426 | ||
@@ -550,17 +556,17 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) | |||
550 | if ((ret = jffs2_do_mount_fs(c))) | 556 | if ((ret = jffs2_do_mount_fs(c))) |
551 | goto out_inohash; | 557 | goto out_inohash; |
552 | 558 | ||
553 | D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n")); | 559 | jffs2_dbg(1, "%s(): Getting root inode\n", __func__); |
554 | root_i = jffs2_iget(sb, 1); | 560 | root_i = jffs2_iget(sb, 1); |
555 | if (IS_ERR(root_i)) { | 561 | if (IS_ERR(root_i)) { |
556 | D1(printk(KERN_WARNING "get root inode failed\n")); | 562 | jffs2_dbg(1, "get root inode failed\n"); |
557 | ret = PTR_ERR(root_i); | 563 | ret = PTR_ERR(root_i); |
558 | goto out_root; | 564 | goto out_root; |
559 | } | 565 | } |
560 | 566 | ||
561 | ret = -ENOMEM; | 567 | ret = -ENOMEM; |
562 | 568 | ||
563 | D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); | 569 | jffs2_dbg(1, "%s(): d_alloc_root()\n", __func__); |
564 | sb->s_root = d_alloc_root(root_i); | 570 | sb->s_root = d_alloc_root(root_i); |
565 | if (!sb->s_root) | 571 | if (!sb->s_root) |
566 | goto out_root_i; | 572 | goto out_root_i; |
@@ -620,20 +626,21 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, | |||
620 | */ | 626 | */ |
621 | inode = ilookup(OFNI_BS_2SFFJ(c), inum); | 627 | inode = ilookup(OFNI_BS_2SFFJ(c), inum); |
622 | if (!inode) { | 628 | if (!inode) { |
623 | D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n", | 629 | jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n", |
624 | inum)); | 630 | inum); |
625 | 631 | ||
626 | spin_lock(&c->inocache_lock); | 632 | spin_lock(&c->inocache_lock); |
627 | ic = jffs2_get_ino_cache(c, inum); | 633 | ic = jffs2_get_ino_cache(c, inum); |
628 | if (!ic) { | 634 | if (!ic) { |
629 | D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum)); | 635 | jffs2_dbg(1, "Inode cache for ino #%u is gone\n", |
636 | inum); | ||
630 | spin_unlock(&c->inocache_lock); | 637 | spin_unlock(&c->inocache_lock); |
631 | return NULL; | 638 | return NULL; |
632 | } | 639 | } |
633 | if (ic->state != INO_STATE_CHECKEDABSENT) { | 640 | if (ic->state != INO_STATE_CHECKEDABSENT) { |
634 | /* Wait for progress. Don't just loop */ | 641 | /* Wait for progress. Don't just loop */ |
635 | D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n", | 642 | jffs2_dbg(1, "Waiting for ino #%u in state %d\n", |
636 | ic->ino, ic->state)); | 643 | ic->ino, ic->state); |
637 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | 644 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); |
638 | } else { | 645 | } else { |
639 | spin_unlock(&c->inocache_lock); | 646 | spin_unlock(&c->inocache_lock); |
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c index 31dce611337c..85e703a29361 100644 --- a/fs/jffs2/gc.c +++ b/fs/jffs2/gc.c | |||
@@ -51,44 +51,44 @@ static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c) | |||
51 | number of free blocks is low. */ | 51 | number of free blocks is low. */ |
52 | again: | 52 | again: |
53 | if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) { | 53 | if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) { |
54 | D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n")); | 54 | jffs2_dbg(1, "Picking block from bad_used_list to GC next\n"); |
55 | nextlist = &c->bad_used_list; | 55 | nextlist = &c->bad_used_list; |
56 | } else if (n < 50 && !list_empty(&c->erasable_list)) { | 56 | } else if (n < 50 && !list_empty(&c->erasable_list)) { |
57 | /* Note that most of them will have gone directly to be erased. | 57 | /* Note that most of them will have gone directly to be erased. |
58 | So don't favour the erasable_list _too_ much. */ | 58 | So don't favour the erasable_list _too_ much. */ |
59 | D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n")); | 59 | jffs2_dbg(1, "Picking block from erasable_list to GC next\n"); |
60 | nextlist = &c->erasable_list; | 60 | nextlist = &c->erasable_list; |
61 | } else if (n < 110 && !list_empty(&c->very_dirty_list)) { | 61 | } else if (n < 110 && !list_empty(&c->very_dirty_list)) { |
62 | /* Most of the time, pick one off the very_dirty list */ | 62 | /* Most of the time, pick one off the very_dirty list */ |
63 | D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next\n")); | 63 | jffs2_dbg(1, "Picking block from very_dirty_list to GC next\n"); |
64 | nextlist = &c->very_dirty_list; | 64 | nextlist = &c->very_dirty_list; |
65 | } else if (n < 126 && !list_empty(&c->dirty_list)) { | 65 | } else if (n < 126 && !list_empty(&c->dirty_list)) { |
66 | D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next\n")); | 66 | jffs2_dbg(1, "Picking block from dirty_list to GC next\n"); |
67 | nextlist = &c->dirty_list; | 67 | nextlist = &c->dirty_list; |
68 | } else if (!list_empty(&c->clean_list)) { | 68 | } else if (!list_empty(&c->clean_list)) { |
69 | D1(printk(KERN_DEBUG "Picking block from clean_list to GC next\n")); | 69 | jffs2_dbg(1, "Picking block from clean_list to GC next\n"); |
70 | nextlist = &c->clean_list; | 70 | nextlist = &c->clean_list; |
71 | } else if (!list_empty(&c->dirty_list)) { | 71 | } else if (!list_empty(&c->dirty_list)) { |
72 | D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next (clean_list was empty)\n")); | 72 | jffs2_dbg(1, "Picking block from dirty_list to GC next (clean_list was empty)\n"); |
73 | 73 | ||
74 | nextlist = &c->dirty_list; | 74 | nextlist = &c->dirty_list; |
75 | } else if (!list_empty(&c->very_dirty_list)) { | 75 | } else if (!list_empty(&c->very_dirty_list)) { |
76 | D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n")); | 76 | jffs2_dbg(1, "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n"); |
77 | nextlist = &c->very_dirty_list; | 77 | nextlist = &c->very_dirty_list; |
78 | } else if (!list_empty(&c->erasable_list)) { | 78 | } else if (!list_empty(&c->erasable_list)) { |
79 | D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n")); | 79 | jffs2_dbg(1, "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n"); |
80 | 80 | ||
81 | nextlist = &c->erasable_list; | 81 | nextlist = &c->erasable_list; |
82 | } else if (!list_empty(&c->erasable_pending_wbuf_list)) { | 82 | } else if (!list_empty(&c->erasable_pending_wbuf_list)) { |
83 | /* There are blocks are wating for the wbuf sync */ | 83 | /* There are blocks are wating for the wbuf sync */ |
84 | D1(printk(KERN_DEBUG "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n")); | 84 | jffs2_dbg(1, "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n"); |
85 | spin_unlock(&c->erase_completion_lock); | 85 | spin_unlock(&c->erase_completion_lock); |
86 | jffs2_flush_wbuf_pad(c); | 86 | jffs2_flush_wbuf_pad(c); |
87 | spin_lock(&c->erase_completion_lock); | 87 | spin_lock(&c->erase_completion_lock); |
88 | goto again; | 88 | goto again; |
89 | } else { | 89 | } else { |
90 | /* Eep. All were empty */ | 90 | /* Eep. All were empty */ |
91 | D1(printk(KERN_NOTICE "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n")); | 91 | jffs2_dbg(1, "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n"); |
92 | return NULL; | 92 | return NULL; |
93 | } | 93 | } |
94 | 94 | ||
@@ -103,7 +103,8 @@ again: | |||
103 | 103 | ||
104 | /* Have we accidentally picked a clean block with wasted space ? */ | 104 | /* Have we accidentally picked a clean block with wasted space ? */ |
105 | if (ret->wasted_size) { | 105 | if (ret->wasted_size) { |
106 | D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size)); | 106 | jffs2_dbg(1, "Converting wasted_size %08x to dirty_size\n", |
107 | ret->wasted_size); | ||
107 | ret->dirty_size += ret->wasted_size; | 108 | ret->dirty_size += ret->wasted_size; |
108 | c->wasted_size -= ret->wasted_size; | 109 | c->wasted_size -= ret->wasted_size; |
109 | c->dirty_size += ret->wasted_size; | 110 | c->dirty_size += ret->wasted_size; |
@@ -163,8 +164,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
163 | } | 164 | } |
164 | 165 | ||
165 | if (!ic->pino_nlink) { | 166 | if (!ic->pino_nlink) { |
166 | D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink/pino zero\n", | 167 | jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n", |
167 | ic->ino)); | 168 | ic->ino); |
168 | spin_unlock(&c->inocache_lock); | 169 | spin_unlock(&c->inocache_lock); |
169 | jffs2_xattr_delete_inode(c, ic); | 170 | jffs2_xattr_delete_inode(c, ic); |
170 | continue; | 171 | continue; |
@@ -172,7 +173,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
172 | switch(ic->state) { | 173 | switch(ic->state) { |
173 | case INO_STATE_CHECKEDABSENT: | 174 | case INO_STATE_CHECKEDABSENT: |
174 | case INO_STATE_PRESENT: | 175 | case INO_STATE_PRESENT: |
175 | D1(printk(KERN_DEBUG "Skipping ino #%u already checked\n", ic->ino)); | 176 | jffs2_dbg(1, "Skipping ino #%u already checked\n", |
177 | ic->ino); | ||
176 | spin_unlock(&c->inocache_lock); | 178 | spin_unlock(&c->inocache_lock); |
177 | continue; | 179 | continue; |
178 | 180 | ||
@@ -186,7 +188,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
186 | /* We need to wait for it to finish, lest we move on | 188 | /* We need to wait for it to finish, lest we move on |
187 | and trigger the BUG() above while we haven't yet | 189 | and trigger the BUG() above while we haven't yet |
188 | finished checking all its nodes */ | 190 | finished checking all its nodes */ |
189 | D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino)); | 191 | jffs2_dbg(1, "Waiting for ino #%u to finish reading\n", |
192 | ic->ino); | ||
190 | /* We need to come back again for the _same_ inode. We've | 193 | /* We need to come back again for the _same_ inode. We've |
191 | made no progress in this case, but that should be OK */ | 194 | made no progress in this case, but that should be OK */ |
192 | c->checked_ino--; | 195 | c->checked_ino--; |
@@ -204,7 +207,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
204 | ic->state = INO_STATE_CHECKING; | 207 | ic->state = INO_STATE_CHECKING; |
205 | spin_unlock(&c->inocache_lock); | 208 | spin_unlock(&c->inocache_lock); |
206 | 209 | ||
207 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() triggering inode scan of ino#%u\n", ic->ino)); | 210 | jffs2_dbg(1, "%s(): triggering inode scan of ino#%u\n", |
211 | __func__, ic->ino); | ||
208 | 212 | ||
209 | ret = jffs2_do_crccheck_inode(c, ic); | 213 | ret = jffs2_do_crccheck_inode(c, ic); |
210 | if (ret) | 214 | if (ret) |
@@ -220,11 +224,11 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
220 | !list_empty(&c->erase_pending_list)) { | 224 | !list_empty(&c->erase_pending_list)) { |
221 | spin_unlock(&c->erase_completion_lock); | 225 | spin_unlock(&c->erase_completion_lock); |
222 | mutex_unlock(&c->alloc_sem); | 226 | mutex_unlock(&c->alloc_sem); |
223 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() erasing pending blocks\n")); | 227 | jffs2_dbg(1, "%s(): erasing pending blocks\n", __func__); |
224 | if (jffs2_erase_pending_blocks(c, 1)) | 228 | if (jffs2_erase_pending_blocks(c, 1)) |
225 | return 0; | 229 | return 0; |
226 | 230 | ||
227 | D1(printk(KERN_DEBUG "No progress from erasing blocks; doing GC anyway\n")); | 231 | jffs2_dbg(1, "No progress from erasing block; doing GC anyway\n"); |
228 | spin_lock(&c->erase_completion_lock); | 232 | spin_lock(&c->erase_completion_lock); |
229 | mutex_lock(&c->alloc_sem); | 233 | mutex_lock(&c->alloc_sem); |
230 | } | 234 | } |
@@ -242,13 +246,14 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
242 | mutex_unlock(&c->alloc_sem); | 246 | mutex_unlock(&c->alloc_sem); |
243 | return -EAGAIN; | 247 | return -EAGAIN; |
244 | } | 248 | } |
245 | D1(printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n")); | 249 | jffs2_dbg(1, "jffs2: Couldn't find erase block to garbage collect!\n"); |
246 | spin_unlock(&c->erase_completion_lock); | 250 | spin_unlock(&c->erase_completion_lock); |
247 | mutex_unlock(&c->alloc_sem); | 251 | mutex_unlock(&c->alloc_sem); |
248 | return -EIO; | 252 | return -EIO; |
249 | } | 253 | } |
250 | 254 | ||
251 | D1(printk(KERN_DEBUG "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size)); | 255 | jffs2_dbg(1, "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", |
256 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size); | ||
252 | D1(if (c->nextblock) | 257 | D1(if (c->nextblock) |
253 | printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size)); | 258 | printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size)); |
254 | 259 | ||
@@ -261,7 +266,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
261 | gcblock_dirty = jeb->dirty_size; | 266 | gcblock_dirty = jeb->dirty_size; |
262 | 267 | ||
263 | while(ref_obsolete(raw)) { | 268 | while(ref_obsolete(raw)) { |
264 | D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw))); | 269 | jffs2_dbg(1, "Node at 0x%08x is obsolete... skipping\n", |
270 | ref_offset(raw)); | ||
265 | raw = ref_next(raw); | 271 | raw = ref_next(raw); |
266 | if (unlikely(!raw)) { | 272 | if (unlikely(!raw)) { |
267 | printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n"); | 273 | printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n"); |
@@ -275,7 +281,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
275 | } | 281 | } |
276 | jeb->gc_node = raw; | 282 | jeb->gc_node = raw; |
277 | 283 | ||
278 | D1(printk(KERN_DEBUG "Going to garbage collect node at 0x%08x\n", ref_offset(raw))); | 284 | jffs2_dbg(1, "Going to garbage collect node at 0x%08x\n", |
285 | ref_offset(raw)); | ||
279 | 286 | ||
280 | if (!raw->next_in_ino) { | 287 | if (!raw->next_in_ino) { |
281 | /* Inode-less node. Clean marker, snapshot or something like that */ | 288 | /* Inode-less node. Clean marker, snapshot or something like that */ |
@@ -316,7 +323,9 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
316 | 323 | ||
317 | spin_unlock(&c->erase_completion_lock); | 324 | spin_unlock(&c->erase_completion_lock); |
318 | 325 | ||
319 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", jeb->offset, ref_offset(raw), ref_flags(raw), ic->ino)); | 326 | jffs2_dbg(1, "%s(): collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", |
327 | __func__, jeb->offset, ref_offset(raw), ref_flags(raw), | ||
328 | ic->ino); | ||
320 | 329 | ||
321 | /* Three possibilities: | 330 | /* Three possibilities: |
322 | 1. Inode is already in-core. We must iget it and do proper | 331 | 1. Inode is already in-core. We must iget it and do proper |
@@ -336,8 +345,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
336 | if (ref_flags(raw) == REF_PRISTINE) | 345 | if (ref_flags(raw) == REF_PRISTINE) |
337 | ic->state = INO_STATE_GC; | 346 | ic->state = INO_STATE_GC; |
338 | else { | 347 | else { |
339 | D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", | 348 | jffs2_dbg(1, "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", |
340 | ic->ino)); | 349 | ic->ino); |
341 | } | 350 | } |
342 | break; | 351 | break; |
343 | 352 | ||
@@ -367,8 +376,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
367 | drop the alloc_sem before sleeping. */ | 376 | drop the alloc_sem before sleeping. */ |
368 | 377 | ||
369 | mutex_unlock(&c->alloc_sem); | 378 | mutex_unlock(&c->alloc_sem); |
370 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n", | 379 | jffs2_dbg(1, "%s(): waiting for ino #%u in state %d\n", |
371 | ic->ino, ic->state)); | 380 | __func__, ic->ino, ic->state); |
372 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | 381 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); |
373 | /* And because we dropped the alloc_sem we must start again from the | 382 | /* And because we dropped the alloc_sem we must start again from the |
374 | beginning. Ponder chance of livelock here -- we're returning success | 383 | beginning. Ponder chance of livelock here -- we're returning success |
@@ -445,7 +454,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
445 | 454 | ||
446 | eraseit: | 455 | eraseit: |
447 | if (c->gcblock && !c->gcblock->used_size) { | 456 | if (c->gcblock && !c->gcblock->used_size) { |
448 | D1(printk(KERN_DEBUG "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", c->gcblock->offset)); | 457 | jffs2_dbg(1, "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", |
458 | c->gcblock->offset); | ||
449 | /* We're GC'ing an empty block? */ | 459 | /* We're GC'ing an empty block? */ |
450 | list_add_tail(&c->gcblock->list, &c->erase_pending_list); | 460 | list_add_tail(&c->gcblock->list, &c->erase_pending_list); |
451 | c->gcblock = NULL; | 461 | c->gcblock = NULL; |
@@ -475,12 +485,12 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era | |||
475 | 485 | ||
476 | if (c->gcblock != jeb) { | 486 | if (c->gcblock != jeb) { |
477 | spin_unlock(&c->erase_completion_lock); | 487 | spin_unlock(&c->erase_completion_lock); |
478 | D1(printk(KERN_DEBUG "GC block is no longer gcblock. Restart\n")); | 488 | jffs2_dbg(1, "GC block is no longer gcblock. Restart\n"); |
479 | goto upnout; | 489 | goto upnout; |
480 | } | 490 | } |
481 | if (ref_obsolete(raw)) { | 491 | if (ref_obsolete(raw)) { |
482 | spin_unlock(&c->erase_completion_lock); | 492 | spin_unlock(&c->erase_completion_lock); |
483 | D1(printk(KERN_DEBUG "node to be GC'd was obsoleted in the meantime.\n")); | 493 | jffs2_dbg(1, "node to be GC'd was obsoleted in the meantime.\n"); |
484 | /* They'll call again */ | 494 | /* They'll call again */ |
485 | goto upnout; | 495 | goto upnout; |
486 | } | 496 | } |
@@ -562,7 +572,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
562 | uint32_t crc, rawlen; | 572 | uint32_t crc, rawlen; |
563 | int retried = 0; | 573 | int retried = 0; |
564 | 574 | ||
565 | D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw))); | 575 | jffs2_dbg(1, "Going to GC REF_PRISTINE node at 0x%08x\n", |
576 | ref_offset(raw)); | ||
566 | 577 | ||
567 | alloclen = rawlen = ref_totlen(c, c->gcblock, raw); | 578 | alloclen = rawlen = ref_totlen(c, c->gcblock, raw); |
568 | 579 | ||
@@ -671,7 +682,7 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
671 | 682 | ||
672 | retried = 1; | 683 | retried = 1; |
673 | 684 | ||
674 | D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n")); | 685 | jffs2_dbg(1, "Retrying failed write of REF_PRISTINE node.\n"); |
675 | 686 | ||
676 | jffs2_dbg_acct_sanity_check(c,jeb); | 687 | jffs2_dbg_acct_sanity_check(c,jeb); |
677 | jffs2_dbg_acct_paranoia_check(c, jeb); | 688 | jffs2_dbg_acct_paranoia_check(c, jeb); |
@@ -681,14 +692,16 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
681 | it is only an upper estimation */ | 692 | it is only an upper estimation */ |
682 | 693 | ||
683 | if (!ret) { | 694 | if (!ret) { |
684 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs)); | 695 | jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n", |
696 | phys_ofs); | ||
685 | 697 | ||
686 | jffs2_dbg_acct_sanity_check(c,jeb); | 698 | jffs2_dbg_acct_sanity_check(c,jeb); |
687 | jffs2_dbg_acct_paranoia_check(c, jeb); | 699 | jffs2_dbg_acct_paranoia_check(c, jeb); |
688 | 700 | ||
689 | goto retry; | 701 | goto retry; |
690 | } | 702 | } |
691 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); | 703 | jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n", |
704 | ret); | ||
692 | } | 705 | } |
693 | 706 | ||
694 | if (!ret) | 707 | if (!ret) |
@@ -698,7 +711,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
698 | jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic); | 711 | jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic); |
699 | 712 | ||
700 | jffs2_mark_node_obsolete(c, raw); | 713 | jffs2_mark_node_obsolete(c, raw); |
701 | D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw))); | 714 | jffs2_dbg(1, "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", |
715 | ref_offset(raw)); | ||
702 | 716 | ||
703 | out_node: | 717 | out_node: |
704 | kfree(node); | 718 | kfree(node); |
@@ -725,7 +739,8 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ | |||
725 | /* For these, we don't actually need to read the old node */ | 739 | /* For these, we don't actually need to read the old node */ |
726 | mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f)); | 740 | mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f)); |
727 | mdata = (char *)&dev; | 741 | mdata = (char *)&dev; |
728 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen)); | 742 | jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n", |
743 | __func__, mdatalen); | ||
729 | } else if (S_ISLNK(JFFS2_F_I_MODE(f))) { | 744 | } else if (S_ISLNK(JFFS2_F_I_MODE(f))) { |
730 | mdatalen = fn->size; | 745 | mdatalen = fn->size; |
731 | mdata = kmalloc(fn->size, GFP_KERNEL); | 746 | mdata = kmalloc(fn->size, GFP_KERNEL); |
@@ -739,7 +754,8 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ | |||
739 | kfree(mdata); | 754 | kfree(mdata); |
740 | return ret; | 755 | return ret; |
741 | } | 756 | } |
742 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen)); | 757 | jffs2_dbg(1, "%s(): Writing %d bites of symlink target\n", |
758 | __func__, mdatalen); | ||
743 | 759 | ||
744 | } | 760 | } |
745 | 761 | ||
@@ -887,7 +903,8 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct | |||
887 | if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset)) | 903 | if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset)) |
888 | continue; | 904 | continue; |
889 | 905 | ||
890 | D1(printk(KERN_DEBUG "Check potential deletion dirent at %08x\n", ref_offset(raw))); | 906 | jffs2_dbg(1, "Check potential deletion dirent at %08x\n", |
907 | ref_offset(raw)); | ||
891 | 908 | ||
892 | /* This is an obsolete node belonging to the same directory, and it's of the right | 909 | /* This is an obsolete node belonging to the same directory, and it's of the right |
893 | length. We need to take a closer look...*/ | 910 | length. We need to take a closer look...*/ |
@@ -923,8 +940,9 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct | |||
923 | a new deletion dirent to replace it */ | 940 | a new deletion dirent to replace it */ |
924 | mutex_unlock(&c->erase_free_sem); | 941 | mutex_unlock(&c->erase_free_sem); |
925 | 942 | ||
926 | D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n", | 943 | jffs2_dbg(1, "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n", |
927 | ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino))); | 944 | ref_offset(fd->raw), fd->name, |
945 | ref_offset(raw), je32_to_cpu(rd->ino)); | ||
928 | kfree(rd); | 946 | kfree(rd); |
929 | 947 | ||
930 | return jffs2_garbage_collect_dirent(c, jeb, f, fd); | 948 | return jffs2_garbage_collect_dirent(c, jeb, f, fd); |
@@ -964,8 +982,8 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
964 | uint32_t alloclen, ilen; | 982 | uint32_t alloclen, ilen; |
965 | int ret; | 983 | int ret; |
966 | 984 | ||
967 | D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", | 985 | jffs2_dbg(1, "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", |
968 | f->inocache->ino, start, end)); | 986 | f->inocache->ino, start, end); |
969 | 987 | ||
970 | memset(&ri, 0, sizeof(ri)); | 988 | memset(&ri, 0, sizeof(ri)); |
971 | 989 | ||
@@ -1117,8 +1135,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1117 | 1135 | ||
1118 | memset(&ri, 0, sizeof(ri)); | 1136 | memset(&ri, 0, sizeof(ri)); |
1119 | 1137 | ||
1120 | D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n", | 1138 | jffs2_dbg(1, "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n", |
1121 | f->inocache->ino, start, end)); | 1139 | f->inocache->ino, start, end); |
1122 | 1140 | ||
1123 | orig_end = end; | 1141 | orig_end = end; |
1124 | orig_start = start; | 1142 | orig_start = start; |
@@ -1149,15 +1167,15 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1149 | /* If the previous frag doesn't even reach the beginning, there's | 1167 | /* If the previous frag doesn't even reach the beginning, there's |
1150 | excessive fragmentation. Just merge. */ | 1168 | excessive fragmentation. Just merge. */ |
1151 | if (frag->ofs > min) { | 1169 | if (frag->ofs > min) { |
1152 | D1(printk(KERN_DEBUG "Expanding down to cover partial frag (0x%x-0x%x)\n", | 1170 | jffs2_dbg(1, "Expanding down to cover partial frag (0x%x-0x%x)\n", |
1153 | frag->ofs, frag->ofs+frag->size)); | 1171 | frag->ofs, frag->ofs+frag->size); |
1154 | start = frag->ofs; | 1172 | start = frag->ofs; |
1155 | continue; | 1173 | continue; |
1156 | } | 1174 | } |
1157 | /* OK. This frag holds the first byte of the page. */ | 1175 | /* OK. This frag holds the first byte of the page. */ |
1158 | if (!frag->node || !frag->node->raw) { | 1176 | if (!frag->node || !frag->node->raw) { |
1159 | D1(printk(KERN_DEBUG "First frag in page is hole (0x%x-0x%x). Not expanding down.\n", | 1177 | jffs2_dbg(1, "First frag in page is hole (0x%x-0x%x). Not expanding down.\n", |
1160 | frag->ofs, frag->ofs+frag->size)); | 1178 | frag->ofs, frag->ofs+frag->size); |
1161 | break; | 1179 | break; |
1162 | } else { | 1180 | } else { |
1163 | 1181 | ||
@@ -1171,19 +1189,25 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1171 | jeb = &c->blocks[raw->flash_offset / c->sector_size]; | 1189 | jeb = &c->blocks[raw->flash_offset / c->sector_size]; |
1172 | 1190 | ||
1173 | if (jeb == c->gcblock) { | 1191 | if (jeb == c->gcblock) { |
1174 | D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n", | 1192 | jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n", |
1175 | frag->ofs, frag->ofs+frag->size, ref_offset(raw))); | 1193 | frag->ofs, |
1194 | frag->ofs + frag->size, | ||
1195 | ref_offset(raw)); | ||
1176 | start = frag->ofs; | 1196 | start = frag->ofs; |
1177 | break; | 1197 | break; |
1178 | } | 1198 | } |
1179 | if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { | 1199 | if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { |
1180 | D1(printk(KERN_DEBUG "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n", | 1200 | jffs2_dbg(1, "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n", |
1181 | frag->ofs, frag->ofs+frag->size, jeb->offset)); | 1201 | frag->ofs, |
1202 | frag->ofs + frag->size, | ||
1203 | jeb->offset); | ||
1182 | break; | 1204 | break; |
1183 | } | 1205 | } |
1184 | 1206 | ||
1185 | D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n", | 1207 | jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n", |
1186 | frag->ofs, frag->ofs+frag->size, jeb->offset)); | 1208 | frag->ofs, |
1209 | frag->ofs + frag->size, | ||
1210 | jeb->offset); | ||
1187 | start = frag->ofs; | 1211 | start = frag->ofs; |
1188 | break; | 1212 | break; |
1189 | } | 1213 | } |
@@ -1199,15 +1223,15 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1199 | /* If the previous frag doesn't even reach the beginning, there's lots | 1223 | /* If the previous frag doesn't even reach the beginning, there's lots |
1200 | of fragmentation. Just merge. */ | 1224 | of fragmentation. Just merge. */ |
1201 | if (frag->ofs+frag->size < max) { | 1225 | if (frag->ofs+frag->size < max) { |
1202 | D1(printk(KERN_DEBUG "Expanding up to cover partial frag (0x%x-0x%x)\n", | 1226 | jffs2_dbg(1, "Expanding up to cover partial frag (0x%x-0x%x)\n", |
1203 | frag->ofs, frag->ofs+frag->size)); | 1227 | frag->ofs, frag->ofs+frag->size); |
1204 | end = frag->ofs + frag->size; | 1228 | end = frag->ofs + frag->size; |
1205 | continue; | 1229 | continue; |
1206 | } | 1230 | } |
1207 | 1231 | ||
1208 | if (!frag->node || !frag->node->raw) { | 1232 | if (!frag->node || !frag->node->raw) { |
1209 | D1(printk(KERN_DEBUG "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n", | 1233 | jffs2_dbg(1, "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n", |
1210 | frag->ofs, frag->ofs+frag->size)); | 1234 | frag->ofs, frag->ofs+frag->size); |
1211 | break; | 1235 | break; |
1212 | } else { | 1236 | } else { |
1213 | 1237 | ||
@@ -1221,25 +1245,31 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1221 | jeb = &c->blocks[raw->flash_offset / c->sector_size]; | 1245 | jeb = &c->blocks[raw->flash_offset / c->sector_size]; |
1222 | 1246 | ||
1223 | if (jeb == c->gcblock) { | 1247 | if (jeb == c->gcblock) { |
1224 | D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n", | 1248 | jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n", |
1225 | frag->ofs, frag->ofs+frag->size, ref_offset(raw))); | 1249 | frag->ofs, |
1250 | frag->ofs + frag->size, | ||
1251 | ref_offset(raw)); | ||
1226 | end = frag->ofs + frag->size; | 1252 | end = frag->ofs + frag->size; |
1227 | break; | 1253 | break; |
1228 | } | 1254 | } |
1229 | if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { | 1255 | if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { |
1230 | D1(printk(KERN_DEBUG "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n", | 1256 | jffs2_dbg(1, "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n", |
1231 | frag->ofs, frag->ofs+frag->size, jeb->offset)); | 1257 | frag->ofs, |
1258 | frag->ofs + frag->size, | ||
1259 | jeb->offset); | ||
1232 | break; | 1260 | break; |
1233 | } | 1261 | } |
1234 | 1262 | ||
1235 | D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n", | 1263 | jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n", |
1236 | frag->ofs, frag->ofs+frag->size, jeb->offset)); | 1264 | frag->ofs, |
1265 | frag->ofs + frag->size, | ||
1266 | jeb->offset); | ||
1237 | end = frag->ofs + frag->size; | 1267 | end = frag->ofs + frag->size; |
1238 | break; | 1268 | break; |
1239 | } | 1269 | } |
1240 | } | 1270 | } |
1241 | D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n", | 1271 | jffs2_dbg(1, "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n", |
1242 | orig_start, orig_end, start, end)); | 1272 | orig_start, orig_end, start, end); |
1243 | 1273 | ||
1244 | D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size)); | 1274 | D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size)); |
1245 | BUG_ON(end < orig_end); | 1275 | BUG_ON(end < orig_end); |
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index 694aa5b03505..d76a268a1973 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c | |||
@@ -46,10 +46,10 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
46 | /* align it */ | 46 | /* align it */ |
47 | minsize = PAD(minsize); | 47 | minsize = PAD(minsize); |
48 | 48 | ||
49 | D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize)); | 49 | jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize); |
50 | mutex_lock(&c->alloc_sem); | 50 | mutex_lock(&c->alloc_sem); |
51 | 51 | ||
52 | D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n")); | 52 | jffs2_dbg(1, "%s(): alloc sem got\n", __func__); |
53 | 53 | ||
54 | spin_lock(&c->erase_completion_lock); | 54 | spin_lock(&c->erase_completion_lock); |
55 | 55 | ||
@@ -73,11 +73,13 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
73 | dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size; | 73 | dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size; |
74 | if (dirty < c->nospc_dirty_size) { | 74 | if (dirty < c->nospc_dirty_size) { |
75 | if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { | 75 | if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { |
76 | D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n")); | 76 | jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n", |
77 | __func__); | ||
77 | break; | 78 | break; |
78 | } | 79 | } |
79 | D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n", | 80 | jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n", |
80 | dirty, c->unchecked_size, c->sector_size)); | 81 | dirty, c->unchecked_size, |
82 | c->sector_size); | ||
81 | 83 | ||
82 | spin_unlock(&c->erase_completion_lock); | 84 | spin_unlock(&c->erase_completion_lock); |
83 | mutex_unlock(&c->alloc_sem); | 85 | mutex_unlock(&c->alloc_sem); |
@@ -96,12 +98,13 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
96 | avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size; | 98 | avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size; |
97 | if ( (avail / c->sector_size) <= blocksneeded) { | 99 | if ( (avail / c->sector_size) <= blocksneeded) { |
98 | if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { | 100 | if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { |
99 | D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n")); | 101 | jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n", |
102 | __func__); | ||
100 | break; | 103 | break; |
101 | } | 104 | } |
102 | 105 | ||
103 | D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n", | 106 | jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n", |
104 | avail, blocksneeded * c->sector_size)); | 107 | avail, blocksneeded * c->sector_size); |
105 | spin_unlock(&c->erase_completion_lock); | 108 | spin_unlock(&c->erase_completion_lock); |
106 | mutex_unlock(&c->alloc_sem); | 109 | mutex_unlock(&c->alloc_sem); |
107 | return -ENOSPC; | 110 | return -ENOSPC; |
@@ -109,9 +112,14 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
109 | 112 | ||
110 | mutex_unlock(&c->alloc_sem); | 113 | mutex_unlock(&c->alloc_sem); |
111 | 114 | ||
112 | D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n", | 115 | jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n", |
113 | c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size, | 116 | c->nr_free_blocks, c->nr_erasing_blocks, |
114 | c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size)); | 117 | c->free_size, c->dirty_size, c->wasted_size, |
118 | c->used_size, c->erasing_size, c->bad_size, | ||
119 | c->free_size + c->dirty_size + | ||
120 | c->wasted_size + c->used_size + | ||
121 | c->erasing_size + c->bad_size, | ||
122 | c->flash_size); | ||
115 | spin_unlock(&c->erase_completion_lock); | 123 | spin_unlock(&c->erase_completion_lock); |
116 | 124 | ||
117 | ret = jffs2_garbage_collect_pass(c); | 125 | ret = jffs2_garbage_collect_pass(c); |
@@ -124,7 +132,8 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
124 | DECLARE_WAITQUEUE(wait, current); | 132 | DECLARE_WAITQUEUE(wait, current); |
125 | set_current_state(TASK_UNINTERRUPTIBLE); | 133 | set_current_state(TASK_UNINTERRUPTIBLE); |
126 | add_wait_queue(&c->erase_wait, &wait); | 134 | add_wait_queue(&c->erase_wait, &wait); |
127 | D1(printk(KERN_DEBUG "%s waiting for erase to complete\n", __func__)); | 135 | jffs2_dbg(1, "%s waiting for erase to complete\n", |
136 | __func__); | ||
128 | spin_unlock(&c->erase_completion_lock); | 137 | spin_unlock(&c->erase_completion_lock); |
129 | 138 | ||
130 | schedule(); | 139 | schedule(); |
@@ -144,7 +153,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
144 | 153 | ||
145 | ret = jffs2_do_reserve_space(c, minsize, len, sumsize); | 154 | ret = jffs2_do_reserve_space(c, minsize, len, sumsize); |
146 | if (ret) { | 155 | if (ret) { |
147 | D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); | 156 | jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret); |
148 | } | 157 | } |
149 | } | 158 | } |
150 | spin_unlock(&c->erase_completion_lock); | 159 | spin_unlock(&c->erase_completion_lock); |
@@ -161,13 +170,14 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, | |||
161 | int ret = -EAGAIN; | 170 | int ret = -EAGAIN; |
162 | minsize = PAD(minsize); | 171 | minsize = PAD(minsize); |
163 | 172 | ||
164 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize)); | 173 | jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize); |
165 | 174 | ||
166 | spin_lock(&c->erase_completion_lock); | 175 | spin_lock(&c->erase_completion_lock); |
167 | while(ret == -EAGAIN) { | 176 | while(ret == -EAGAIN) { |
168 | ret = jffs2_do_reserve_space(c, minsize, len, sumsize); | 177 | ret = jffs2_do_reserve_space(c, minsize, len, sumsize); |
169 | if (ret) { | 178 | if (ret) { |
170 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); | 179 | jffs2_dbg(1, "%s(): looping, ret is %d\n", |
180 | __func__, ret); | ||
171 | } | 181 | } |
172 | } | 182 | } |
173 | spin_unlock(&c->erase_completion_lock); | 183 | spin_unlock(&c->erase_completion_lock); |
@@ -184,8 +194,8 @@ static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
184 | { | 194 | { |
185 | 195 | ||
186 | if (c->nextblock == NULL) { | 196 | if (c->nextblock == NULL) { |
187 | D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n", | 197 | jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n", |
188 | jeb->offset)); | 198 | __func__, jeb->offset); |
189 | return; | 199 | return; |
190 | } | 200 | } |
191 | /* Check, if we have a dirty block now, or if it was dirty already */ | 201 | /* Check, if we have a dirty block now, or if it was dirty already */ |
@@ -195,17 +205,20 @@ static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
195 | jeb->dirty_size += jeb->wasted_size; | 205 | jeb->dirty_size += jeb->wasted_size; |
196 | jeb->wasted_size = 0; | 206 | jeb->wasted_size = 0; |
197 | if (VERYDIRTY(c, jeb->dirty_size)) { | 207 | if (VERYDIRTY(c, jeb->dirty_size)) { |
198 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | 208 | jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", |
199 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | 209 | jeb->offset, jeb->free_size, jeb->dirty_size, |
210 | jeb->used_size); | ||
200 | list_add_tail(&jeb->list, &c->very_dirty_list); | 211 | list_add_tail(&jeb->list, &c->very_dirty_list); |
201 | } else { | 212 | } else { |
202 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | 213 | jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", |
203 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | 214 | jeb->offset, jeb->free_size, jeb->dirty_size, |
215 | jeb->used_size); | ||
204 | list_add_tail(&jeb->list, &c->dirty_list); | 216 | list_add_tail(&jeb->list, &c->dirty_list); |
205 | } | 217 | } |
206 | } else { | 218 | } else { |
207 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | 219 | jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", |
208 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | 220 | jeb->offset, jeb->free_size, jeb->dirty_size, |
221 | jeb->used_size); | ||
209 | list_add_tail(&jeb->list, &c->clean_list); | 222 | list_add_tail(&jeb->list, &c->clean_list); |
210 | } | 223 | } |
211 | c->nextblock = NULL; | 224 | c->nextblock = NULL; |
@@ -230,13 +243,14 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c) | |||
230 | list_move_tail(&ejeb->list, &c->erase_pending_list); | 243 | list_move_tail(&ejeb->list, &c->erase_pending_list); |
231 | c->nr_erasing_blocks++; | 244 | c->nr_erasing_blocks++; |
232 | jffs2_garbage_collect_trigger(c); | 245 | jffs2_garbage_collect_trigger(c); |
233 | D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n", | 246 | jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n", |
234 | ejeb->offset)); | 247 | __func__, ejeb->offset); |
235 | } | 248 | } |
236 | 249 | ||
237 | if (!c->nr_erasing_blocks && | 250 | if (!c->nr_erasing_blocks && |
238 | !list_empty(&c->erasable_pending_wbuf_list)) { | 251 | !list_empty(&c->erasable_pending_wbuf_list)) { |
239 | D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n")); | 252 | jffs2_dbg(1, "%s(): Flushing write buffer\n", |
253 | __func__); | ||
240 | /* c->nextblock is NULL, no update to c->nextblock allowed */ | 254 | /* c->nextblock is NULL, no update to c->nextblock allowed */ |
241 | spin_unlock(&c->erase_completion_lock); | 255 | spin_unlock(&c->erase_completion_lock); |
242 | jffs2_flush_wbuf_pad(c); | 256 | jffs2_flush_wbuf_pad(c); |
@@ -278,7 +292,8 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c) | |||
278 | c->wbuf_ofs = 0xffffffff; | 292 | c->wbuf_ofs = 0xffffffff; |
279 | #endif | 293 | #endif |
280 | 294 | ||
281 | D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset)); | 295 | jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n", |
296 | __func__, c->nextblock->offset); | ||
282 | 297 | ||
283 | return 0; | 298 | return 0; |
284 | } | 299 | } |
@@ -345,7 +360,8 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
345 | 360 | ||
346 | if (jffs2_wbuf_dirty(c)) { | 361 | if (jffs2_wbuf_dirty(c)) { |
347 | spin_unlock(&c->erase_completion_lock); | 362 | spin_unlock(&c->erase_completion_lock); |
348 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); | 363 | jffs2_dbg(1, "%s(): Flushing write buffer\n", |
364 | __func__); | ||
349 | jffs2_flush_wbuf_pad(c); | 365 | jffs2_flush_wbuf_pad(c); |
350 | spin_lock(&c->erase_completion_lock); | 366 | spin_lock(&c->erase_completion_lock); |
351 | jeb = c->nextblock; | 367 | jeb = c->nextblock; |
@@ -408,8 +424,9 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
408 | spin_lock(&c->erase_completion_lock); | 424 | spin_lock(&c->erase_completion_lock); |
409 | } | 425 | } |
410 | 426 | ||
411 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", | 427 | jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n", |
412 | *len, jeb->offset + (c->sector_size - jeb->free_size))); | 428 | __func__, |
429 | *len, jeb->offset + (c->sector_size - jeb->free_size)); | ||
413 | return 0; | 430 | return 0; |
414 | } | 431 | } |
415 | 432 | ||
@@ -434,8 +451,8 @@ struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, | |||
434 | 451 | ||
435 | jeb = &c->blocks[ofs / c->sector_size]; | 452 | jeb = &c->blocks[ofs / c->sector_size]; |
436 | 453 | ||
437 | D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", | 454 | jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n", |
438 | ofs & ~3, ofs & 3, len)); | 455 | __func__, ofs & ~3, ofs & 3, len); |
439 | #if 1 | 456 | #if 1 |
440 | /* Allow non-obsolete nodes only to be added at the end of c->nextblock, | 457 | /* Allow non-obsolete nodes only to be added at the end of c->nextblock, |
441 | if c->nextblock is set. Note that wbuf.c will file obsolete nodes | 458 | if c->nextblock is set. Note that wbuf.c will file obsolete nodes |
@@ -457,8 +474,9 @@ struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, | |||
457 | 474 | ||
458 | if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) { | 475 | if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) { |
459 | /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ | 476 | /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ |
460 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | 477 | jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", |
461 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | 478 | jeb->offset, jeb->free_size, jeb->dirty_size, |
479 | jeb->used_size); | ||
462 | if (jffs2_wbuf_dirty(c)) { | 480 | if (jffs2_wbuf_dirty(c)) { |
463 | /* Flush the last write in the block if it's outstanding */ | 481 | /* Flush the last write in the block if it's outstanding */ |
464 | spin_unlock(&c->erase_completion_lock); | 482 | spin_unlock(&c->erase_completion_lock); |
@@ -480,7 +498,7 @@ struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, | |||
480 | 498 | ||
481 | void jffs2_complete_reservation(struct jffs2_sb_info *c) | 499 | void jffs2_complete_reservation(struct jffs2_sb_info *c) |
482 | { | 500 | { |
483 | D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n")); | 501 | jffs2_dbg(1, "jffs2_complete_reservation()\n"); |
484 | spin_lock(&c->erase_completion_lock); | 502 | spin_lock(&c->erase_completion_lock); |
485 | jffs2_garbage_collect_trigger(c); | 503 | jffs2_garbage_collect_trigger(c); |
486 | spin_unlock(&c->erase_completion_lock); | 504 | spin_unlock(&c->erase_completion_lock); |
@@ -493,7 +511,7 @@ static inline int on_list(struct list_head *obj, struct list_head *head) | |||
493 | 511 | ||
494 | list_for_each(this, head) { | 512 | list_for_each(this, head) { |
495 | if (this == obj) { | 513 | if (this == obj) { |
496 | D1(printk("%p is on list at %p\n", obj, head)); | 514 | jffs2_dbg(1, "%p is on list at %p\n", obj, head); |
497 | return 1; | 515 | return 1; |
498 | 516 | ||
499 | } | 517 | } |
@@ -515,7 +533,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
515 | return; | 533 | return; |
516 | } | 534 | } |
517 | if (ref_obsolete(ref)) { | 535 | if (ref_obsolete(ref)) { |
518 | D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref))); | 536 | jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n", |
537 | __func__, ref_offset(ref)); | ||
519 | return; | 538 | return; |
520 | } | 539 | } |
521 | blocknr = ref->flash_offset / c->sector_size; | 540 | blocknr = ref->flash_offset / c->sector_size; |
@@ -546,7 +565,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
546 | freed_len, blocknr, ref->flash_offset, jeb->used_size); | 565 | freed_len, blocknr, ref->flash_offset, jeb->used_size); |
547 | BUG(); | 566 | BUG(); |
548 | }) | 567 | }) |
549 | D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len)); | 568 | jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n", |
569 | ref_offset(ref), freed_len); | ||
550 | jeb->unchecked_size -= freed_len; | 570 | jeb->unchecked_size -= freed_len; |
551 | c->unchecked_size -= freed_len; | 571 | c->unchecked_size -= freed_len; |
552 | } else { | 572 | } else { |
@@ -555,14 +575,15 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
555 | freed_len, blocknr, ref->flash_offset, jeb->used_size); | 575 | freed_len, blocknr, ref->flash_offset, jeb->used_size); |
556 | BUG(); | 576 | BUG(); |
557 | }) | 577 | }) |
558 | D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len)); | 578 | jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ", |
579 | ref_offset(ref), freed_len); | ||
559 | jeb->used_size -= freed_len; | 580 | jeb->used_size -= freed_len; |
560 | c->used_size -= freed_len; | 581 | c->used_size -= freed_len; |
561 | } | 582 | } |
562 | 583 | ||
563 | // Take care, that wasted size is taken into concern | 584 | // Take care, that wasted size is taken into concern |
564 | if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) { | 585 | if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) { |
565 | D1(printk("Dirtying\n")); | 586 | jffs2_dbg(1, "Dirtying\n"); |
566 | addedsize = freed_len; | 587 | addedsize = freed_len; |
567 | jeb->dirty_size += freed_len; | 588 | jeb->dirty_size += freed_len; |
568 | c->dirty_size += freed_len; | 589 | c->dirty_size += freed_len; |
@@ -570,12 +591,12 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
570 | /* Convert wasted space to dirty, if not a bad block */ | 591 | /* Convert wasted space to dirty, if not a bad block */ |
571 | if (jeb->wasted_size) { | 592 | if (jeb->wasted_size) { |
572 | if (on_list(&jeb->list, &c->bad_used_list)) { | 593 | if (on_list(&jeb->list, &c->bad_used_list)) { |
573 | D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n", | 594 | jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n", |
574 | jeb->offset)); | 595 | jeb->offset); |
575 | addedsize = 0; /* To fool the refiling code later */ | 596 | addedsize = 0; /* To fool the refiling code later */ |
576 | } else { | 597 | } else { |
577 | D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n", | 598 | jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n", |
578 | jeb->wasted_size, jeb->offset)); | 599 | jeb->wasted_size, jeb->offset); |
579 | addedsize += jeb->wasted_size; | 600 | addedsize += jeb->wasted_size; |
580 | jeb->dirty_size += jeb->wasted_size; | 601 | jeb->dirty_size += jeb->wasted_size; |
581 | c->dirty_size += jeb->wasted_size; | 602 | c->dirty_size += jeb->wasted_size; |
@@ -584,7 +605,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
584 | } | 605 | } |
585 | } | 606 | } |
586 | } else { | 607 | } else { |
587 | D1(printk("Wasting\n")); | 608 | jffs2_dbg(1, "Wasting\n"); |
588 | addedsize = 0; | 609 | addedsize = 0; |
589 | jeb->wasted_size += freed_len; | 610 | jeb->wasted_size += freed_len; |
590 | c->wasted_size += freed_len; | 611 | c->wasted_size += freed_len; |
@@ -606,50 +627,57 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
606 | } | 627 | } |
607 | 628 | ||
608 | if (jeb == c->nextblock) { | 629 | if (jeb == c->nextblock) { |
609 | D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset)); | 630 | jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n", |
631 | jeb->offset); | ||
610 | } else if (!jeb->used_size && !jeb->unchecked_size) { | 632 | } else if (!jeb->used_size && !jeb->unchecked_size) { |
611 | if (jeb == c->gcblock) { | 633 | if (jeb == c->gcblock) { |
612 | D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset)); | 634 | jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", |
635 | jeb->offset); | ||
613 | c->gcblock = NULL; | 636 | c->gcblock = NULL; |
614 | } else { | 637 | } else { |
615 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset)); | 638 | jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", |
639 | jeb->offset); | ||
616 | list_del(&jeb->list); | 640 | list_del(&jeb->list); |
617 | } | 641 | } |
618 | if (jffs2_wbuf_dirty(c)) { | 642 | if (jffs2_wbuf_dirty(c)) { |
619 | D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n")); | 643 | jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n"); |
620 | list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list); | 644 | list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list); |
621 | } else { | 645 | } else { |
622 | if (jiffies & 127) { | 646 | if (jiffies & 127) { |
623 | /* Most of the time, we just erase it immediately. Otherwise we | 647 | /* Most of the time, we just erase it immediately. Otherwise we |
624 | spend ages scanning it on mount, etc. */ | 648 | spend ages scanning it on mount, etc. */ |
625 | D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n")); | 649 | jffs2_dbg(1, "...and adding to erase_pending_list\n"); |
626 | list_add_tail(&jeb->list, &c->erase_pending_list); | 650 | list_add_tail(&jeb->list, &c->erase_pending_list); |
627 | c->nr_erasing_blocks++; | 651 | c->nr_erasing_blocks++; |
628 | jffs2_garbage_collect_trigger(c); | 652 | jffs2_garbage_collect_trigger(c); |
629 | } else { | 653 | } else { |
630 | /* Sometimes, however, we leave it elsewhere so it doesn't get | 654 | /* Sometimes, however, we leave it elsewhere so it doesn't get |
631 | immediately reused, and we spread the load a bit. */ | 655 | immediately reused, and we spread the load a bit. */ |
632 | D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); | 656 | jffs2_dbg(1, "...and adding to erasable_list\n"); |
633 | list_add_tail(&jeb->list, &c->erasable_list); | 657 | list_add_tail(&jeb->list, &c->erasable_list); |
634 | } | 658 | } |
635 | } | 659 | } |
636 | D1(printk(KERN_DEBUG "Done OK\n")); | 660 | jffs2_dbg(1, "Done OK\n"); |
637 | } else if (jeb == c->gcblock) { | 661 | } else if (jeb == c->gcblock) { |
638 | D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset)); | 662 | jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n", |
663 | jeb->offset); | ||
639 | } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) { | 664 | } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) { |
640 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset)); | 665 | jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", |
666 | jeb->offset); | ||
641 | list_del(&jeb->list); | 667 | list_del(&jeb->list); |
642 | D1(printk(KERN_DEBUG "...and adding to dirty_list\n")); | 668 | jffs2_dbg(1, "...and adding to dirty_list\n"); |
643 | list_add_tail(&jeb->list, &c->dirty_list); | 669 | list_add_tail(&jeb->list, &c->dirty_list); |
644 | } else if (VERYDIRTY(c, jeb->dirty_size) && | 670 | } else if (VERYDIRTY(c, jeb->dirty_size) && |
645 | !VERYDIRTY(c, jeb->dirty_size - addedsize)) { | 671 | !VERYDIRTY(c, jeb->dirty_size - addedsize)) { |
646 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset)); | 672 | jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", |
673 | jeb->offset); | ||
647 | list_del(&jeb->list); | 674 | list_del(&jeb->list); |
648 | D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n")); | 675 | jffs2_dbg(1, "...and adding to very_dirty_list\n"); |
649 | list_add_tail(&jeb->list, &c->very_dirty_list); | 676 | list_add_tail(&jeb->list, &c->very_dirty_list); |
650 | } else { | 677 | } else { |
651 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", | 678 | jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", |
652 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | 679 | jeb->offset, jeb->free_size, jeb->dirty_size, |
680 | jeb->used_size); | ||
653 | } | 681 | } |
654 | 682 | ||
655 | spin_unlock(&c->erase_completion_lock); | 683 | spin_unlock(&c->erase_completion_lock); |
@@ -665,7 +693,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
665 | the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet | 693 | the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet |
666 | by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */ | 694 | by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */ |
667 | 695 | ||
668 | D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref))); | 696 | jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n", |
697 | ref_offset(ref)); | ||
669 | ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); | 698 | ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); |
670 | if (ret) { | 699 | if (ret) { |
671 | printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret); | 700 | printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret); |
@@ -680,7 +709,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
680 | goto out_erase_sem; | 709 | goto out_erase_sem; |
681 | } | 710 | } |
682 | if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { | 711 | if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { |
683 | D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype))); | 712 | jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", |
713 | ref_offset(ref), je16_to_cpu(n.nodetype)); | ||
684 | goto out_erase_sem; | 714 | goto out_erase_sem; |
685 | } | 715 | } |
686 | /* XXX FIXME: This is ugly now */ | 716 | /* XXX FIXME: This is ugly now */ |
@@ -751,8 +781,8 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c) | |||
751 | return 1; | 781 | return 1; |
752 | 782 | ||
753 | if (c->unchecked_size) { | 783 | if (c->unchecked_size) { |
754 | D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n", | 784 | jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n", |
755 | c->unchecked_size, c->checked_ino)); | 785 | c->unchecked_size, c->checked_ino); |
756 | return 1; | 786 | return 1; |
757 | } | 787 | } |
758 | 788 | ||
@@ -780,8 +810,9 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c) | |||
780 | } | 810 | } |
781 | } | 811 | } |
782 | 812 | ||
783 | D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n", | 813 | jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n", |
784 | c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret?"yes":"no")); | 814 | __func__, c->nr_free_blocks, c->nr_erasing_blocks, |
815 | c->dirty_size, nr_very_dirty, ret ? "yes" : "no"); | ||
785 | 816 | ||
786 | return ret; | 817 | return ret; |
787 | } | 818 | } |
diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c index 3f39be1b0455..835dc5d28055 100644 --- a/fs/jffs2/read.c +++ b/fs/jffs2/read.c | |||
@@ -47,10 +47,10 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
47 | } | 47 | } |
48 | crc = crc32(0, ri, sizeof(*ri)-8); | 48 | crc = crc32(0, ri, sizeof(*ri)-8); |
49 | 49 | ||
50 | D1(printk(KERN_DEBUG "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n", | 50 | jffs2_dbg(1, "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n", |
51 | ref_offset(fd->raw), je32_to_cpu(ri->node_crc), | 51 | ref_offset(fd->raw), je32_to_cpu(ri->node_crc), |
52 | crc, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize), | 52 | crc, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize), |
53 | je32_to_cpu(ri->offset), buf)); | 53 | je32_to_cpu(ri->offset), buf); |
54 | if (crc != je32_to_cpu(ri->node_crc)) { | 54 | if (crc != je32_to_cpu(ri->node_crc)) { |
55 | printk(KERN_WARNING "Node CRC %08x != calculated CRC %08x for node at %08x\n", | 55 | printk(KERN_WARNING "Node CRC %08x != calculated CRC %08x for node at %08x\n", |
56 | je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw)); | 56 | je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw)); |
@@ -107,8 +107,8 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
107 | decomprbuf = readbuf; | 107 | decomprbuf = readbuf; |
108 | } | 108 | } |
109 | 109 | ||
110 | D2(printk(KERN_DEBUG "Read %d bytes to %p\n", je32_to_cpu(ri->csize), | 110 | jffs2_dbg(2, "Read %d bytes to %p\n", je32_to_cpu(ri->csize), |
111 | readbuf)); | 111 | readbuf); |
112 | ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri), | 112 | ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri), |
113 | je32_to_cpu(ri->csize), &readlen, readbuf); | 113 | je32_to_cpu(ri->csize), &readlen, readbuf); |
114 | 114 | ||
@@ -124,10 +124,11 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
124 | ret = -EIO; | 124 | ret = -EIO; |
125 | goto out_decomprbuf; | 125 | goto out_decomprbuf; |
126 | } | 126 | } |
127 | D2(printk(KERN_DEBUG "Data CRC matches calculated CRC %08x\n", crc)); | 127 | jffs2_dbg(2, "Data CRC matches calculated CRC %08x\n", crc); |
128 | if (ri->compr != JFFS2_COMPR_NONE) { | 128 | if (ri->compr != JFFS2_COMPR_NONE) { |
129 | D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n", | 129 | jffs2_dbg(2, "Decompress %d bytes from %p to %d bytes at %p\n", |
130 | je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf)); | 130 | je32_to_cpu(ri->csize), readbuf, |
131 | je32_to_cpu(ri->dsize), decomprbuf); | ||
131 | ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize)); | 132 | ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize)); |
132 | if (ret) { | 133 | if (ret) { |
133 | printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret); | 134 | printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret); |
@@ -157,8 +158,8 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
157 | struct jffs2_node_frag *frag; | 158 | struct jffs2_node_frag *frag; |
158 | int ret; | 159 | int ret; |
159 | 160 | ||
160 | D1(printk(KERN_DEBUG "jffs2_read_inode_range: ino #%u, range 0x%08x-0x%08x\n", | 161 | jffs2_dbg(1, "%s(): ino #%u, range 0x%08x-0x%08x\n", |
161 | f->inocache->ino, offset, offset+len)); | 162 | __func__, f->inocache->ino, offset, offset + len); |
162 | 163 | ||
163 | frag = jffs2_lookup_node_frag(&f->fragtree, offset); | 164 | frag = jffs2_lookup_node_frag(&f->fragtree, offset); |
164 | 165 | ||
@@ -168,22 +169,27 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
168 | * (or perhaps is before it, if we've been asked to read off the | 169 | * (or perhaps is before it, if we've been asked to read off the |
169 | * end of the file). */ | 170 | * end of the file). */ |
170 | while(offset < end) { | 171 | while(offset < end) { |
171 | D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end)); | 172 | jffs2_dbg(2, "%s(): offset %d, end %d\n", |
173 | __func__, offset, end); | ||
172 | if (unlikely(!frag || frag->ofs > offset || | 174 | if (unlikely(!frag || frag->ofs > offset || |
173 | frag->ofs + frag->size <= offset)) { | 175 | frag->ofs + frag->size <= offset)) { |
174 | uint32_t holesize = end - offset; | 176 | uint32_t holesize = end - offset; |
175 | if (frag && frag->ofs > offset) { | 177 | if (frag && frag->ofs > offset) { |
176 | D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset)); | 178 | jffs2_dbg(1, "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", |
179 | f->inocache->ino, frag->ofs, offset); | ||
177 | holesize = min(holesize, frag->ofs - offset); | 180 | holesize = min(holesize, frag->ofs - offset); |
178 | } | 181 | } |
179 | D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize)); | 182 | jffs2_dbg(1, "Filling non-frag hole from %d-%d\n", |
183 | offset, offset + holesize); | ||
180 | memset(buf, 0, holesize); | 184 | memset(buf, 0, holesize); |
181 | buf += holesize; | 185 | buf += holesize; |
182 | offset += holesize; | 186 | offset += holesize; |
183 | continue; | 187 | continue; |
184 | } else if (unlikely(!frag->node)) { | 188 | } else if (unlikely(!frag->node)) { |
185 | uint32_t holeend = min(end, frag->ofs + frag->size); | 189 | uint32_t holeend = min(end, frag->ofs + frag->size); |
186 | D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size)); | 190 | jffs2_dbg(1, "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", |
191 | offset, holeend, frag->ofs, | ||
192 | frag->ofs + frag->size); | ||
187 | memset(buf, 0, holeend - offset); | 193 | memset(buf, 0, holeend - offset); |
188 | buf += holeend - offset; | 194 | buf += holeend - offset; |
189 | offset = holeend; | 195 | offset = holeend; |
@@ -195,20 +201,23 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
195 | 201 | ||
196 | fragofs = offset - frag->ofs; | 202 | fragofs = offset - frag->ofs; |
197 | readlen = min(frag->size - fragofs, end - offset); | 203 | readlen = min(frag->size - fragofs, end - offset); |
198 | D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n", | 204 | jffs2_dbg(1, "Reading %d-%d from node at 0x%08x (%d)\n", |
199 | frag->ofs+fragofs, frag->ofs+fragofs+readlen, | 205 | frag->ofs+fragofs, |
200 | ref_offset(frag->node->raw), ref_flags(frag->node->raw))); | 206 | frag->ofs + fragofs+readlen, |
207 | ref_offset(frag->node->raw), | ||
208 | ref_flags(frag->node->raw)); | ||
201 | ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen); | 209 | ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen); |
202 | D2(printk(KERN_DEBUG "node read done\n")); | 210 | jffs2_dbg(2, "node read done\n"); |
203 | if (ret) { | 211 | if (ret) { |
204 | D1(printk(KERN_DEBUG"jffs2_read_inode_range error %d\n",ret)); | 212 | jffs2_dbg(1, "%s(): error %d\n", |
213 | __func__, ret); | ||
205 | memset(buf, 0, readlen); | 214 | memset(buf, 0, readlen); |
206 | return ret; | 215 | return ret; |
207 | } | 216 | } |
208 | buf += readlen; | 217 | buf += readlen; |
209 | offset += readlen; | 218 | offset += readlen; |
210 | frag = frag_next(frag); | 219 | frag = frag_next(frag); |
211 | D2(printk(KERN_DEBUG "node read was OK. Looping\n")); | 220 | jffs2_dbg(2, "node read was OK. Looping\n"); |
212 | } | 221 | } |
213 | } | 222 | } |
214 | return 0; | 223 | return 0; |
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index a06d47a688c7..b6c3d883b7d7 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c | |||
@@ -100,12 +100,13 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
100 | (void **)&flashbuf, NULL); | 100 | (void **)&flashbuf, NULL); |
101 | if (!ret && pointlen < c->mtd->size) { | 101 | if (!ret && pointlen < c->mtd->size) { |
102 | /* Don't muck about if it won't let us point to the whole flash */ | 102 | /* Don't muck about if it won't let us point to the whole flash */ |
103 | D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen)); | 103 | jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n", |
104 | pointlen); | ||
104 | mtd_unpoint(c->mtd, 0, pointlen); | 105 | mtd_unpoint(c->mtd, 0, pointlen); |
105 | flashbuf = NULL; | 106 | flashbuf = NULL; |
106 | } | 107 | } |
107 | if (ret && ret != -EOPNOTSUPP) | 108 | if (ret && ret != -EOPNOTSUPP) |
108 | D1(printk(KERN_DEBUG "MTD point failed %d\n", ret)); | 109 | jffs2_dbg(1, "MTD point failed %d\n", ret); |
109 | #endif | 110 | #endif |
110 | if (!flashbuf) { | 111 | if (!flashbuf) { |
111 | /* For NAND it's quicker to read a whole eraseblock at a time, | 112 | /* For NAND it's quicker to read a whole eraseblock at a time, |
@@ -115,15 +116,15 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
115 | else | 116 | else |
116 | try_size = PAGE_SIZE; | 117 | try_size = PAGE_SIZE; |
117 | 118 | ||
118 | D1(printk(KERN_DEBUG "Trying to allocate readbuf of %zu " | 119 | jffs2_dbg(1, "Trying to allocate readbuf of %zu " |
119 | "bytes\n", try_size)); | 120 | "bytes\n", try_size); |
120 | 121 | ||
121 | flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size); | 122 | flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size); |
122 | if (!flashbuf) | 123 | if (!flashbuf) |
123 | return -ENOMEM; | 124 | return -ENOMEM; |
124 | 125 | ||
125 | D1(printk(KERN_DEBUG "Allocated readbuf of %zu bytes\n", | 126 | jffs2_dbg(1, "Allocated readbuf of %zu bytes\n", |
126 | try_size)); | 127 | try_size); |
127 | 128 | ||
128 | buf_size = (uint32_t)try_size; | 129 | buf_size = (uint32_t)try_size; |
129 | } | 130 | } |
@@ -176,7 +177,8 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
176 | c->nr_free_blocks++; | 177 | c->nr_free_blocks++; |
177 | } else { | 178 | } else { |
178 | /* Dirt */ | 179 | /* Dirt */ |
179 | D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset)); | 180 | jffs2_dbg(1, "Adding all-dirty block at 0x%08x to erase_pending_list\n", |
181 | jeb->offset); | ||
180 | list_add(&jeb->list, &c->erase_pending_list); | 182 | list_add(&jeb->list, &c->erase_pending_list); |
181 | c->nr_erasing_blocks++; | 183 | c->nr_erasing_blocks++; |
182 | } | 184 | } |
@@ -203,7 +205,8 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
203 | } | 205 | } |
204 | /* update collected summary information for the current nextblock */ | 206 | /* update collected summary information for the current nextblock */ |
205 | jffs2_sum_move_collected(c, s); | 207 | jffs2_sum_move_collected(c, s); |
206 | D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset)); | 208 | jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n", |
209 | __func__, jeb->offset); | ||
207 | c->nextblock = jeb; | 210 | c->nextblock = jeb; |
208 | } else { | 211 | } else { |
209 | ret = file_dirty(c, jeb); | 212 | ret = file_dirty(c, jeb); |
@@ -215,13 +218,15 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
215 | case BLK_STATE_ALLDIRTY: | 218 | case BLK_STATE_ALLDIRTY: |
216 | /* Nothing valid - not even a clean marker. Needs erasing. */ | 219 | /* Nothing valid - not even a clean marker. Needs erasing. */ |
217 | /* For now we just put it on the erasing list. We'll start the erases later */ | 220 | /* For now we just put it on the erasing list. We'll start the erases later */ |
218 | D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset)); | 221 | jffs2_dbg(1, "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", |
222 | jeb->offset); | ||
219 | list_add(&jeb->list, &c->erase_pending_list); | 223 | list_add(&jeb->list, &c->erase_pending_list); |
220 | c->nr_erasing_blocks++; | 224 | c->nr_erasing_blocks++; |
221 | break; | 225 | break; |
222 | 226 | ||
223 | case BLK_STATE_BADBLOCK: | 227 | case BLK_STATE_BADBLOCK: |
224 | D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset)); | 228 | jffs2_dbg(1, "JFFS2: Block at 0x%08x is bad\n", |
229 | jeb->offset); | ||
225 | list_add(&jeb->list, &c->bad_list); | 230 | list_add(&jeb->list, &c->bad_list); |
226 | c->bad_size += c->sector_size; | 231 | c->bad_size += c->sector_size; |
227 | c->free_size -= c->sector_size; | 232 | c->free_size -= c->sector_size; |
@@ -248,8 +253,8 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
248 | 253 | ||
249 | uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize; | 254 | uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize; |
250 | 255 | ||
251 | D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n", | 256 | jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n", |
252 | skip)); | 257 | __func__, skip); |
253 | jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); | 258 | jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); |
254 | jffs2_scan_dirty_space(c, c->nextblock, skip); | 259 | jffs2_scan_dirty_space(c, c->nextblock, skip); |
255 | } | 260 | } |
@@ -285,11 +290,13 @@ static int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf, | |||
285 | 290 | ||
286 | ret = jffs2_flash_read(c, ofs, len, &retlen, buf); | 291 | ret = jffs2_flash_read(c, ofs, len, &retlen, buf); |
287 | if (ret) { | 292 | if (ret) { |
288 | D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret)); | 293 | jffs2_dbg(1, "mtd->read(0x%x bytes from 0x%x) returned %d\n", |
294 | len, ofs, ret); | ||
289 | return ret; | 295 | return ret; |
290 | } | 296 | } |
291 | if (retlen < len) { | 297 | if (retlen < len) { |
292 | D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen)); | 298 | jffs2_dbg(1, "Read at 0x%x gave only 0x%zx bytes\n", |
299 | ofs, retlen); | ||
293 | return -EIO; | 300 | return -EIO; |
294 | } | 301 | } |
295 | return 0; | 302 | return 0; |
@@ -447,7 +454,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
447 | ofs = jeb->offset; | 454 | ofs = jeb->offset; |
448 | prevofs = jeb->offset - 1; | 455 | prevofs = jeb->offset - 1; |
449 | 456 | ||
450 | D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs)); | 457 | jffs2_dbg(1, "%s(): Scanning block at 0x%x\n", __func__, ofs); |
451 | 458 | ||
452 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 459 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER |
453 | if (jffs2_cleanmarker_oob(c)) { | 460 | if (jffs2_cleanmarker_oob(c)) { |
@@ -457,7 +464,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
457 | return BLK_STATE_BADBLOCK; | 464 | return BLK_STATE_BADBLOCK; |
458 | 465 | ||
459 | ret = jffs2_check_nand_cleanmarker(c, jeb); | 466 | ret = jffs2_check_nand_cleanmarker(c, jeb); |
460 | D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret)); | 467 | jffs2_dbg(2, "jffs_check_nand_cleanmarker returned %d\n", ret); |
461 | 468 | ||
462 | /* Even if it's not found, we still scan to see | 469 | /* Even if it's not found, we still scan to see |
463 | if the block is empty. We use this information | 470 | if the block is empty. We use this information |
@@ -559,7 +566,8 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
559 | if (jffs2_cleanmarker_oob(c)) { | 566 | if (jffs2_cleanmarker_oob(c)) { |
560 | /* scan oob, take care of cleanmarker */ | 567 | /* scan oob, take care of cleanmarker */ |
561 | int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound); | 568 | int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound); |
562 | D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret)); | 569 | jffs2_dbg(2, "jffs2_check_oob_empty returned %d\n", |
570 | ret); | ||
563 | switch (ret) { | 571 | switch (ret) { |
564 | case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF; | 572 | case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF; |
565 | case 1: return BLK_STATE_ALLDIRTY; | 573 | case 1: return BLK_STATE_ALLDIRTY; |
@@ -567,15 +575,16 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
567 | } | 575 | } |
568 | } | 576 | } |
569 | #endif | 577 | #endif |
570 | D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset)); | 578 | jffs2_dbg(1, "Block at 0x%08x is empty (erased)\n", |
579 | jeb->offset); | ||
571 | if (c->cleanmarker_size == 0) | 580 | if (c->cleanmarker_size == 0) |
572 | return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */ | 581 | return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */ |
573 | else | 582 | else |
574 | return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */ | 583 | return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */ |
575 | } | 584 | } |
576 | if (ofs) { | 585 | if (ofs) { |
577 | D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset, | 586 | jffs2_dbg(1, "Free space at %08x ends at %08x\n", jeb->offset, |
578 | jeb->offset + ofs)); | 587 | jeb->offset + ofs); |
579 | if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1))) | 588 | if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1))) |
580 | return err; | 589 | return err; |
581 | if ((err = jffs2_scan_dirty_space(c, jeb, ofs))) | 590 | if ((err = jffs2_scan_dirty_space(c, jeb, ofs))) |
@@ -616,8 +625,10 @@ scan_more: | |||
616 | prevofs = ofs; | 625 | prevofs = ofs; |
617 | 626 | ||
618 | if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { | 627 | if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { |
619 | D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node), | 628 | jffs2_dbg(1, "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", |
620 | jeb->offset, c->sector_size, ofs, sizeof(*node))); | 629 | sizeof(struct jffs2_unknown_node), |
630 | jeb->offset, c->sector_size, ofs, | ||
631 | sizeof(*node)); | ||
621 | if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs))) | 632 | if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs))) |
622 | return err; | 633 | return err; |
623 | break; | 634 | break; |
@@ -625,8 +636,9 @@ scan_more: | |||
625 | 636 | ||
626 | if (buf_ofs + buf_len < ofs + sizeof(*node)) { | 637 | if (buf_ofs + buf_len < ofs + sizeof(*node)) { |
627 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 638 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); |
628 | D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n", | 639 | jffs2_dbg(1, "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n", |
629 | sizeof(struct jffs2_unknown_node), buf_len, ofs)); | 640 | sizeof(struct jffs2_unknown_node), |
641 | buf_len, ofs); | ||
630 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 642 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); |
631 | if (err) | 643 | if (err) |
632 | return err; | 644 | return err; |
@@ -643,7 +655,7 @@ scan_more: | |||
643 | ofs += 4; | 655 | ofs += 4; |
644 | scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len); | 656 | scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len); |
645 | 657 | ||
646 | D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs)); | 658 | jffs2_dbg(1, "Found empty flash at 0x%08x\n", ofs); |
647 | more_empty: | 659 | more_empty: |
648 | inbuf_ofs = ofs - buf_ofs; | 660 | inbuf_ofs = ofs - buf_ofs; |
649 | while (inbuf_ofs < scan_end) { | 661 | while (inbuf_ofs < scan_end) { |
@@ -659,13 +671,15 @@ scan_more: | |||
659 | ofs += 4; | 671 | ofs += 4; |
660 | } | 672 | } |
661 | /* Ran off end. */ | 673 | /* Ran off end. */ |
662 | D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs)); | 674 | jffs2_dbg(1, "Empty flash to end of buffer at 0x%08x\n", |
675 | ofs); | ||
663 | 676 | ||
664 | /* If we're only checking the beginning of a block with a cleanmarker, | 677 | /* If we're only checking the beginning of a block with a cleanmarker, |
665 | bail now */ | 678 | bail now */ |
666 | if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && | 679 | if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && |
667 | c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) { | 680 | c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) { |
668 | D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size))); | 681 | jffs2_dbg(1, "%d bytes at start of block seems clean... assuming all clean\n", |
682 | EMPTY_SCAN_SIZE(c->sector_size)); | ||
669 | return BLK_STATE_CLEANMARKER; | 683 | return BLK_STATE_CLEANMARKER; |
670 | } | 684 | } |
671 | if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */ | 685 | if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */ |
@@ -678,13 +692,14 @@ scan_more: | |||
678 | if (!buf_len) { | 692 | if (!buf_len) { |
679 | /* No more to read. Break out of main loop without marking | 693 | /* No more to read. Break out of main loop without marking |
680 | this range of empty space as dirty (because it's not) */ | 694 | this range of empty space as dirty (because it's not) */ |
681 | D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n", | 695 | jffs2_dbg(1, "Empty flash at %08x runs to end of block. Treating as free_space\n", |
682 | empty_start)); | 696 | empty_start); |
683 | break; | 697 | break; |
684 | } | 698 | } |
685 | /* point never reaches here */ | 699 | /* point never reaches here */ |
686 | scan_end = buf_len; | 700 | scan_end = buf_len; |
687 | D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs)); | 701 | jffs2_dbg(1, "Reading another 0x%x at 0x%08x\n", |
702 | buf_len, ofs); | ||
688 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 703 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); |
689 | if (err) | 704 | if (err) |
690 | return err; | 705 | return err; |
@@ -700,7 +715,7 @@ scan_more: | |||
700 | continue; | 715 | continue; |
701 | } | 716 | } |
702 | if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) { | 717 | if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) { |
703 | D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs)); | 718 | jffs2_dbg(1, "Dirty bitmask at 0x%08x\n", ofs); |
704 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) | 719 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) |
705 | return err; | 720 | return err; |
706 | ofs += 4; | 721 | ofs += 4; |
@@ -756,7 +771,8 @@ scan_more: | |||
756 | 771 | ||
757 | if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) { | 772 | if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) { |
758 | /* Wheee. This is an obsoleted node */ | 773 | /* Wheee. This is an obsoleted node */ |
759 | D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs)); | 774 | jffs2_dbg(2, "Node at 0x%08x is obsolete. Skipping\n", |
775 | ofs); | ||
760 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) | 776 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) |
761 | return err; | 777 | return err; |
762 | ofs += PAD(je32_to_cpu(node->totlen)); | 778 | ofs += PAD(je32_to_cpu(node->totlen)); |
@@ -767,8 +783,9 @@ scan_more: | |||
767 | case JFFS2_NODETYPE_INODE: | 783 | case JFFS2_NODETYPE_INODE: |
768 | if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) { | 784 | if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) { |
769 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 785 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); |
770 | D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n", | 786 | jffs2_dbg(1, "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n", |
771 | sizeof(struct jffs2_raw_inode), buf_len, ofs)); | 787 | sizeof(struct jffs2_raw_inode), |
788 | buf_len, ofs); | ||
772 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 789 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); |
773 | if (err) | 790 | if (err) |
774 | return err; | 791 | return err; |
@@ -783,8 +800,9 @@ scan_more: | |||
783 | case JFFS2_NODETYPE_DIRENT: | 800 | case JFFS2_NODETYPE_DIRENT: |
784 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { | 801 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { |
785 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 802 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); |
786 | D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n", | 803 | jffs2_dbg(1, "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n", |
787 | je32_to_cpu(node->totlen), buf_len, ofs)); | 804 | je32_to_cpu(node->totlen), buf_len, |
805 | ofs); | ||
788 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 806 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); |
789 | if (err) | 807 | if (err) |
790 | return err; | 808 | return err; |
@@ -800,9 +818,9 @@ scan_more: | |||
800 | case JFFS2_NODETYPE_XATTR: | 818 | case JFFS2_NODETYPE_XATTR: |
801 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { | 819 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { |
802 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 820 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); |
803 | D1(printk(KERN_DEBUG "Fewer than %d bytes (xattr node)" | 821 | jffs2_dbg(1, "Fewer than %d bytes (xattr node) left to end of buf. Reading 0x%x at 0x%08x\n", |
804 | " left to end of buf. Reading 0x%x at 0x%08x\n", | 822 | je32_to_cpu(node->totlen), buf_len, |
805 | je32_to_cpu(node->totlen), buf_len, ofs)); | 823 | ofs); |
806 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 824 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); |
807 | if (err) | 825 | if (err) |
808 | return err; | 826 | return err; |
@@ -817,9 +835,9 @@ scan_more: | |||
817 | case JFFS2_NODETYPE_XREF: | 835 | case JFFS2_NODETYPE_XREF: |
818 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { | 836 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { |
819 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 837 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); |
820 | D1(printk(KERN_DEBUG "Fewer than %d bytes (xref node)" | 838 | jffs2_dbg(1, "Fewer than %d bytes (xref node) left to end of buf. Reading 0x%x at 0x%08x\n", |
821 | " left to end of buf. Reading 0x%x at 0x%08x\n", | 839 | je32_to_cpu(node->totlen), buf_len, |
822 | je32_to_cpu(node->totlen), buf_len, ofs)); | 840 | ofs); |
823 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 841 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); |
824 | if (err) | 842 | if (err) |
825 | return err; | 843 | return err; |
@@ -834,7 +852,7 @@ scan_more: | |||
834 | #endif /* CONFIG_JFFS2_FS_XATTR */ | 852 | #endif /* CONFIG_JFFS2_FS_XATTR */ |
835 | 853 | ||
836 | case JFFS2_NODETYPE_CLEANMARKER: | 854 | case JFFS2_NODETYPE_CLEANMARKER: |
837 | D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs)); | 855 | jffs2_dbg(1, "CLEANMARKER node found at 0x%08x\n", ofs); |
838 | if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { | 856 | if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { |
839 | printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", | 857 | printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", |
840 | ofs, je32_to_cpu(node->totlen), c->cleanmarker_size); | 858 | ofs, je32_to_cpu(node->totlen), c->cleanmarker_size); |
@@ -878,14 +896,16 @@ scan_more: | |||
878 | return -EINVAL; | 896 | return -EINVAL; |
879 | 897 | ||
880 | case JFFS2_FEATURE_RWCOMPAT_DELETE: | 898 | case JFFS2_FEATURE_RWCOMPAT_DELETE: |
881 | D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); | 899 | jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", |
900 | je16_to_cpu(node->nodetype), ofs); | ||
882 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) | 901 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) |
883 | return err; | 902 | return err; |
884 | ofs += PAD(je32_to_cpu(node->totlen)); | 903 | ofs += PAD(je32_to_cpu(node->totlen)); |
885 | break; | 904 | break; |
886 | 905 | ||
887 | case JFFS2_FEATURE_RWCOMPAT_COPY: { | 906 | case JFFS2_FEATURE_RWCOMPAT_COPY: { |
888 | D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); | 907 | jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", |
908 | je16_to_cpu(node->nodetype), ofs); | ||
889 | 909 | ||
890 | jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL); | 910 | jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL); |
891 | 911 | ||
@@ -906,8 +926,9 @@ scan_more: | |||
906 | } | 926 | } |
907 | } | 927 | } |
908 | 928 | ||
909 | D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n", | 929 | jffs2_dbg(1, "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n", |
910 | jeb->offset,jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size)); | 930 | jeb->offset, jeb->free_size, jeb->dirty_size, |
931 | jeb->unchecked_size, jeb->used_size, jeb->wasted_size); | ||
911 | 932 | ||
912 | /* mark_node_obsolete can add to wasted !! */ | 933 | /* mark_node_obsolete can add to wasted !! */ |
913 | if (jeb->wasted_size) { | 934 | if (jeb->wasted_size) { |
@@ -952,7 +973,7 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
952 | struct jffs2_inode_cache *ic; | 973 | struct jffs2_inode_cache *ic; |
953 | uint32_t crc, ino = je32_to_cpu(ri->ino); | 974 | uint32_t crc, ino = je32_to_cpu(ri->ino); |
954 | 975 | ||
955 | D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs)); | 976 | jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs); |
956 | 977 | ||
957 | /* We do very little here now. Just check the ino# to which we should attribute | 978 | /* We do very little here now. Just check the ino# to which we should attribute |
958 | this node; we can do all the CRC checking etc. later. There's a tradeoff here -- | 979 | this node; we can do all the CRC checking etc. later. There's a tradeoff here -- |
@@ -987,10 +1008,10 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
987 | /* Wheee. It worked */ | 1008 | /* Wheee. It worked */ |
988 | jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic); | 1009 | jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic); |
989 | 1010 | ||
990 | D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n", | 1011 | jffs2_dbg(1, "Node is ino #%u, version %d. Range 0x%x-0x%x\n", |
991 | je32_to_cpu(ri->ino), je32_to_cpu(ri->version), | 1012 | je32_to_cpu(ri->ino), je32_to_cpu(ri->version), |
992 | je32_to_cpu(ri->offset), | 1013 | je32_to_cpu(ri->offset), |
993 | je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize))); | 1014 | je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)); |
994 | 1015 | ||
995 | pseudo_random += je32_to_cpu(ri->version); | 1016 | pseudo_random += je32_to_cpu(ri->version); |
996 | 1017 | ||
@@ -1010,7 +1031,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
1010 | uint32_t crc; | 1031 | uint32_t crc; |
1011 | int err; | 1032 | int err; |
1012 | 1033 | ||
1013 | D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs)); | 1034 | jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs); |
1014 | 1035 | ||
1015 | /* We don't get here unless the node is still valid, so we don't have to | 1036 | /* We don't get here unless the node is still valid, so we don't have to |
1016 | mask in the ACCURATE bit any more. */ | 1037 | mask in the ACCURATE bit any more. */ |
@@ -1044,7 +1065,8 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
1044 | if (crc != je32_to_cpu(rd->name_crc)) { | 1065 | if (crc != je32_to_cpu(rd->name_crc)) { |
1045 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 1066 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", |
1046 | ofs, je32_to_cpu(rd->name_crc), crc); | 1067 | ofs, je32_to_cpu(rd->name_crc), crc); |
1047 | D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino))); | 1068 | jffs2_dbg(1, "Name for which CRC failed is (now) '%s', ino #%d\n", |
1069 | fd->name, je32_to_cpu(rd->ino)); | ||
1048 | jffs2_free_full_dirent(fd); | 1070 | jffs2_free_full_dirent(fd); |
1049 | /* FIXME: Why do we believe totlen? */ | 1071 | /* FIXME: Why do we believe totlen? */ |
1050 | /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */ | 1072 | /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */ |
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index f2d96b5e64f6..3600e3e508e5 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c | |||
@@ -69,7 +69,7 @@ static void jffs2_write_super(struct super_block *sb) | |||
69 | sb->s_dirt = 0; | 69 | sb->s_dirt = 0; |
70 | 70 | ||
71 | if (!(sb->s_flags & MS_RDONLY)) { | 71 | if (!(sb->s_flags & MS_RDONLY)) { |
72 | D1(printk(KERN_DEBUG "jffs2_write_super()\n")); | 72 | jffs2_dbg(1, "%s()\n", __func__); |
73 | jffs2_flush_wbuf_gc(c, 0); | 73 | jffs2_flush_wbuf_gc(c, 0); |
74 | } | 74 | } |
75 | 75 | ||
@@ -266,9 +266,9 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent) | |||
266 | struct jffs2_sb_info *c; | 266 | struct jffs2_sb_info *c; |
267 | int ret; | 267 | int ret; |
268 | 268 | ||
269 | D1(printk(KERN_DEBUG "jffs2_get_sb_mtd():" | 269 | jffs2_dbg(1, "jffs2_get_sb_mtd():" |
270 | " New superblock for device %d (\"%s\")\n", | 270 | " New superblock for device %d (\"%s\")\n", |
271 | sb->s_mtd->index, sb->s_mtd->name)); | 271 | sb->s_mtd->index, sb->s_mtd->name); |
272 | 272 | ||
273 | c = kzalloc(sizeof(*c), GFP_KERNEL); | 273 | c = kzalloc(sizeof(*c), GFP_KERNEL); |
274 | if (!c) | 274 | if (!c) |
@@ -315,7 +315,7 @@ static void jffs2_put_super (struct super_block *sb) | |||
315 | { | 315 | { |
316 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); | 316 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); |
317 | 317 | ||
318 | D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n")); | 318 | jffs2_dbg(2, "%s()\n", __func__); |
319 | 319 | ||
320 | if (sb->s_dirt) | 320 | if (sb->s_dirt) |
321 | jffs2_write_super(sb); | 321 | jffs2_write_super(sb); |
@@ -336,7 +336,7 @@ static void jffs2_put_super (struct super_block *sb) | |||
336 | kfree(c->inocache_list); | 336 | kfree(c->inocache_list); |
337 | jffs2_clear_xattr_subsystem(c); | 337 | jffs2_clear_xattr_subsystem(c); |
338 | mtd_sync(c->mtd); | 338 | mtd_sync(c->mtd); |
339 | D1(printk(KERN_DEBUG "jffs2_put_super returning\n")); | 339 | jffs2_dbg(1, "%s(): returning\n", __func__); |
340 | } | 340 | } |
341 | 341 | ||
342 | static void jffs2_kill_sb(struct super_block *sb) | 342 | static void jffs2_kill_sb(struct super_block *sb) |
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c index e3035afb1814..5188f4d39a5c 100644 --- a/fs/jffs2/symlink.c +++ b/fs/jffs2/symlink.c | |||
@@ -50,7 +50,8 @@ static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
50 | printk(KERN_ERR "jffs2_follow_link(): can't find symlink target\n"); | 50 | printk(KERN_ERR "jffs2_follow_link(): can't find symlink target\n"); |
51 | p = ERR_PTR(-EIO); | 51 | p = ERR_PTR(-EIO); |
52 | } | 52 | } |
53 | D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->target)); | 53 | jffs2_dbg(1, "%s(): target path is '%s'\n", |
54 | __func__, (char *)f->target); | ||
54 | 55 | ||
55 | nd_set_link(nd, p); | 56 | nd_set_link(nd, p); |
56 | 57 | ||
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c index 30e8f47e8a23..d626eb2113e1 100644 --- a/fs/jffs2/wbuf.c +++ b/fs/jffs2/wbuf.c | |||
@@ -91,7 +91,7 @@ static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino) | |||
91 | 91 | ||
92 | new = kmalloc(sizeof(*new), GFP_KERNEL); | 92 | new = kmalloc(sizeof(*new), GFP_KERNEL); |
93 | if (!new) { | 93 | if (!new) { |
94 | D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n")); | 94 | jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n"); |
95 | jffs2_clear_wbuf_ino_list(c); | 95 | jffs2_clear_wbuf_ino_list(c); |
96 | c->wbuf_inodes = &inodirty_nomem; | 96 | c->wbuf_inodes = &inodirty_nomem; |
97 | return; | 97 | return; |
@@ -113,19 +113,20 @@ static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c) | |||
113 | list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) { | 113 | list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) { |
114 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | 114 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); |
115 | 115 | ||
116 | D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset)); | 116 | jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", |
117 | jeb->offset); | ||
117 | list_del(this); | 118 | list_del(this); |
118 | if ((jiffies + (n++)) & 127) { | 119 | if ((jiffies + (n++)) & 127) { |
119 | /* Most of the time, we just erase it immediately. Otherwise we | 120 | /* Most of the time, we just erase it immediately. Otherwise we |
120 | spend ages scanning it on mount, etc. */ | 121 | spend ages scanning it on mount, etc. */ |
121 | D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n")); | 122 | jffs2_dbg(1, "...and adding to erase_pending_list\n"); |
122 | list_add_tail(&jeb->list, &c->erase_pending_list); | 123 | list_add_tail(&jeb->list, &c->erase_pending_list); |
123 | c->nr_erasing_blocks++; | 124 | c->nr_erasing_blocks++; |
124 | jffs2_garbage_collect_trigger(c); | 125 | jffs2_garbage_collect_trigger(c); |
125 | } else { | 126 | } else { |
126 | /* Sometimes, however, we leave it elsewhere so it doesn't get | 127 | /* Sometimes, however, we leave it elsewhere so it doesn't get |
127 | immediately reused, and we spread the load a bit. */ | 128 | immediately reused, and we spread the load a bit. */ |
128 | D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); | 129 | jffs2_dbg(1, "...and adding to erasable_list\n"); |
129 | list_add_tail(&jeb->list, &c->erasable_list); | 130 | list_add_tail(&jeb->list, &c->erasable_list); |
130 | } | 131 | } |
131 | } | 132 | } |
@@ -136,7 +137,7 @@ static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c) | |||
136 | 137 | ||
137 | static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty) | 138 | static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty) |
138 | { | 139 | { |
139 | D1(printk("About to refile bad block at %08x\n", jeb->offset)); | 140 | jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset); |
140 | 141 | ||
141 | /* File the existing block on the bad_used_list.... */ | 142 | /* File the existing block on the bad_used_list.... */ |
142 | if (c->nextblock == jeb) | 143 | if (c->nextblock == jeb) |
@@ -144,12 +145,14 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
144 | else /* Not sure this should ever happen... need more coffee */ | 145 | else /* Not sure this should ever happen... need more coffee */ |
145 | list_del(&jeb->list); | 146 | list_del(&jeb->list); |
146 | if (jeb->first_node) { | 147 | if (jeb->first_node) { |
147 | D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset)); | 148 | jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n", |
149 | jeb->offset); | ||
148 | list_add(&jeb->list, &c->bad_used_list); | 150 | list_add(&jeb->list, &c->bad_used_list); |
149 | } else { | 151 | } else { |
150 | BUG_ON(allow_empty == REFILE_NOTEMPTY); | 152 | BUG_ON(allow_empty == REFILE_NOTEMPTY); |
151 | /* It has to have had some nodes or we couldn't be here */ | 153 | /* It has to have had some nodes or we couldn't be here */ |
152 | D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset)); | 154 | jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n", |
155 | jeb->offset); | ||
153 | list_add(&jeb->list, &c->erase_pending_list); | 156 | list_add(&jeb->list, &c->erase_pending_list); |
154 | c->nr_erasing_blocks++; | 157 | c->nr_erasing_blocks++; |
155 | jffs2_garbage_collect_trigger(c); | 158 | jffs2_garbage_collect_trigger(c); |
@@ -308,7 +311,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
308 | 311 | ||
309 | if (!first_raw) { | 312 | if (!first_raw) { |
310 | /* All nodes were obsolete. Nothing to recover. */ | 313 | /* All nodes were obsolete. Nothing to recover. */ |
311 | D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n")); | 314 | jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n"); |
312 | c->wbuf_len = 0; | 315 | c->wbuf_len = 0; |
313 | return; | 316 | return; |
314 | } | 317 | } |
@@ -406,8 +409,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
406 | unsigned char *rewrite_buf = buf?:c->wbuf; | 409 | unsigned char *rewrite_buf = buf?:c->wbuf; |
407 | uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); | 410 | uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); |
408 | 411 | ||
409 | D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n", | 412 | jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n", |
410 | towrite, ofs)); | 413 | towrite, ofs); |
411 | 414 | ||
412 | #ifdef BREAKMEHEADER | 415 | #ifdef BREAKMEHEADER |
413 | static int breakme; | 416 | static int breakme; |
@@ -459,8 +462,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
459 | struct jffs2_raw_node_ref **adjust_ref = NULL; | 462 | struct jffs2_raw_node_ref **adjust_ref = NULL; |
460 | struct jffs2_inode_info *f = NULL; | 463 | struct jffs2_inode_info *f = NULL; |
461 | 464 | ||
462 | D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n", | 465 | jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n", |
463 | rawlen, ref_offset(raw), ref_flags(raw), ofs)); | 466 | rawlen, ref_offset(raw), ref_flags(raw), ofs); |
464 | 467 | ||
465 | ic = jffs2_raw_ref_to_ic(raw); | 468 | ic = jffs2_raw_ref_to_ic(raw); |
466 | 469 | ||
@@ -540,7 +543,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
540 | 543 | ||
541 | /* Fix up the original jeb now it's on the bad_list */ | 544 | /* Fix up the original jeb now it's on the bad_list */ |
542 | if (first_raw == jeb->first_node) { | 545 | if (first_raw == jeb->first_node) { |
543 | D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset)); | 546 | jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n", |
547 | jeb->offset); | ||
544 | list_move(&jeb->list, &c->erase_pending_list); | 548 | list_move(&jeb->list, &c->erase_pending_list); |
545 | c->nr_erasing_blocks++; | 549 | c->nr_erasing_blocks++; |
546 | jffs2_garbage_collect_trigger(c); | 550 | jffs2_garbage_collect_trigger(c); |
@@ -554,7 +558,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
554 | 558 | ||
555 | spin_unlock(&c->erase_completion_lock); | 559 | spin_unlock(&c->erase_completion_lock); |
556 | 560 | ||
557 | D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len)); | 561 | jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", |
562 | c->wbuf_ofs, c->wbuf_len); | ||
558 | 563 | ||
559 | } | 564 | } |
560 | 565 | ||
@@ -647,8 +652,9 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
647 | if (pad) { | 652 | if (pad) { |
648 | uint32_t waste = c->wbuf_pagesize - c->wbuf_len; | 653 | uint32_t waste = c->wbuf_pagesize - c->wbuf_len; |
649 | 654 | ||
650 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", | 655 | jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", |
651 | (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset)); | 656 | (wbuf_jeb == c->nextblock) ? "next" : "", |
657 | wbuf_jeb->offset); | ||
652 | 658 | ||
653 | /* wbuf_pagesize - wbuf_len is the amount of space that's to be | 659 | /* wbuf_pagesize - wbuf_len is the amount of space that's to be |
654 | padded. If there is less free space in the block than that, | 660 | padded. If there is less free space in the block than that, |
@@ -694,14 +700,14 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) | |||
694 | uint32_t old_wbuf_len; | 700 | uint32_t old_wbuf_len; |
695 | int ret = 0; | 701 | int ret = 0; |
696 | 702 | ||
697 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino)); | 703 | jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino); |
698 | 704 | ||
699 | if (!c->wbuf) | 705 | if (!c->wbuf) |
700 | return 0; | 706 | return 0; |
701 | 707 | ||
702 | mutex_lock(&c->alloc_sem); | 708 | mutex_lock(&c->alloc_sem); |
703 | if (!jffs2_wbuf_pending_for_ino(c, ino)) { | 709 | if (!jffs2_wbuf_pending_for_ino(c, ino)) { |
704 | D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino)); | 710 | jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino); |
705 | mutex_unlock(&c->alloc_sem); | 711 | mutex_unlock(&c->alloc_sem); |
706 | return 0; | 712 | return 0; |
707 | } | 713 | } |
@@ -711,7 +717,8 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) | |||
711 | 717 | ||
712 | if (c->unchecked_size) { | 718 | if (c->unchecked_size) { |
713 | /* GC won't make any progress for a while */ | 719 | /* GC won't make any progress for a while */ |
714 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n")); | 720 | jffs2_dbg(1, "%s(): padding. Not finished checking\n", |
721 | __func__); | ||
715 | down_write(&c->wbuf_sem); | 722 | down_write(&c->wbuf_sem); |
716 | ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); | 723 | ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); |
717 | /* retry flushing wbuf in case jffs2_wbuf_recover | 724 | /* retry flushing wbuf in case jffs2_wbuf_recover |
@@ -724,7 +731,7 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) | |||
724 | 731 | ||
725 | mutex_unlock(&c->alloc_sem); | 732 | mutex_unlock(&c->alloc_sem); |
726 | 733 | ||
727 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n")); | 734 | jffs2_dbg(1, "%s(): calls gc pass\n", __func__); |
728 | 735 | ||
729 | ret = jffs2_garbage_collect_pass(c); | 736 | ret = jffs2_garbage_collect_pass(c); |
730 | if (ret) { | 737 | if (ret) { |
@@ -742,7 +749,7 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) | |||
742 | mutex_lock(&c->alloc_sem); | 749 | mutex_lock(&c->alloc_sem); |
743 | } | 750 | } |
744 | 751 | ||
745 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n")); | 752 | jffs2_dbg(1, "%s(): ends...\n", __func__); |
746 | 753 | ||
747 | mutex_unlock(&c->alloc_sem); | 754 | mutex_unlock(&c->alloc_sem); |
748 | return ret; | 755 | return ret; |
@@ -811,9 +818,8 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, | |||
811 | if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { | 818 | if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { |
812 | /* It's a write to a new block */ | 819 | /* It's a write to a new block */ |
813 | if (c->wbuf_len) { | 820 | if (c->wbuf_len) { |
814 | D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx " | 821 | jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n", |
815 | "causes flush of wbuf at 0x%08x\n", | 822 | __func__, (unsigned long)to, c->wbuf_ofs); |
816 | (unsigned long)to, c->wbuf_ofs)); | ||
817 | ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); | 823 | ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); |
818 | if (ret) | 824 | if (ret) |
819 | goto outerr; | 825 | goto outerr; |
@@ -825,8 +831,8 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, | |||
825 | 831 | ||
826 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { | 832 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { |
827 | /* We're not writing immediately after the writebuffer. Bad. */ | 833 | /* We're not writing immediately after the writebuffer. Bad. */ |
828 | printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write " | 834 | printk(KERN_CRIT "%s(): Non-contiguous write to %08lx\n", |
829 | "to %08lx\n", (unsigned long)to); | 835 | __func__, (unsigned long)to); |
830 | if (c->wbuf_len) | 836 | if (c->wbuf_len) |
831 | printk(KERN_CRIT "wbuf was previously %08x-%08x\n", | 837 | printk(KERN_CRIT "wbuf was previously %08x-%08x\n", |
832 | c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len); | 838 | c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len); |
@@ -1048,8 +1054,8 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c, | |||
1048 | continue; | 1054 | continue; |
1049 | 1055 | ||
1050 | if (ops.oobbuf[i] != 0xFF) { | 1056 | if (ops.oobbuf[i] != 0xFF) { |
1051 | D2(printk(KERN_DEBUG "Found %02x at %x in OOB for " | 1057 | jffs2_dbg(2, "Found %02x at %x in OOB for " |
1052 | "%08x\n", ops.oobbuf[i], i, jeb->offset)); | 1058 | "%08x\n", ops.oobbuf[i], i, jeb->offset); |
1053 | return 1; | 1059 | return 1; |
1054 | } | 1060 | } |
1055 | } | 1061 | } |
@@ -1134,7 +1140,8 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock * | |||
1134 | ret = mtd_block_markbad(c->mtd, bad_offset); | 1140 | ret = mtd_block_markbad(c->mtd, bad_offset); |
1135 | 1141 | ||
1136 | if (ret) { | 1142 | if (ret) { |
1137 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); | 1143 | jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n", |
1144 | __func__, jeb->offset, ret); | ||
1138 | return ret; | 1145 | return ret; |
1139 | } | 1146 | } |
1140 | return 1; | 1147 | return 1; |
@@ -1155,7 +1162,7 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c) | |||
1155 | return -EINVAL; | 1162 | return -EINVAL; |
1156 | } | 1163 | } |
1157 | 1164 | ||
1158 | D1(printk(KERN_DEBUG "JFFS2 using OOB on NAND\n")); | 1165 | jffs2_dbg(1, "JFFS2 using OOB on NAND\n"); |
1159 | 1166 | ||
1160 | c->oobavail = oinfo->oobavail; | 1167 | c->oobavail = oinfo->oobavail; |
1161 | 1168 | ||
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c index 30d175b6d290..e137839a157d 100644 --- a/fs/jffs2/write.c +++ b/fs/jffs2/write.c | |||
@@ -36,7 +36,7 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
36 | f->inocache->state = INO_STATE_PRESENT; | 36 | f->inocache->state = INO_STATE_PRESENT; |
37 | 37 | ||
38 | jffs2_add_ino_cache(c, f->inocache); | 38 | jffs2_add_ino_cache(c, f->inocache); |
39 | D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino)); | 39 | jffs2_dbg(1, "%s(): Assigned ino# %d\n", __func__, f->inocache->ino); |
40 | ri->ino = cpu_to_je32(f->inocache->ino); | 40 | ri->ino = cpu_to_je32(f->inocache->ino); |
41 | 41 | ||
42 | ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | 42 | ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); |
@@ -95,9 +95,9 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
95 | 95 | ||
96 | if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) { | 96 | if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) { |
97 | BUG_ON(!retried); | 97 | BUG_ON(!retried); |
98 | D1(printk(KERN_DEBUG "jffs2_write_dnode : dnode_version %d, " | 98 | jffs2_dbg(1, "%s(): dnode_version %d, highest version %d -> updating dnode\n", |
99 | "highest version %d -> updating dnode\n", | 99 | __func__, |
100 | je32_to_cpu(ri->version), f->highest_version)); | 100 | je32_to_cpu(ri->version), f->highest_version); |
101 | ri->version = cpu_to_je32(++f->highest_version); | 101 | ri->version = cpu_to_je32(++f->highest_version); |
102 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | 102 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); |
103 | } | 103 | } |
@@ -127,7 +127,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
127 | 127 | ||
128 | retried = 1; | 128 | retried = 1; |
129 | 129 | ||
130 | D1(printk(KERN_DEBUG "Retrying failed write.\n")); | 130 | jffs2_dbg(1, "Retrying failed write.\n"); |
131 | 131 | ||
132 | jffs2_dbg_acct_sanity_check(c,jeb); | 132 | jffs2_dbg_acct_sanity_check(c,jeb); |
133 | jffs2_dbg_acct_paranoia_check(c, jeb); | 133 | jffs2_dbg_acct_paranoia_check(c, jeb); |
@@ -147,14 +147,16 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
147 | 147 | ||
148 | if (!ret) { | 148 | if (!ret) { |
149 | flash_ofs = write_ofs(c); | 149 | flash_ofs = write_ofs(c); |
150 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); | 150 | jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n", |
151 | flash_ofs); | ||
151 | 152 | ||
152 | jffs2_dbg_acct_sanity_check(c,jeb); | 153 | jffs2_dbg_acct_sanity_check(c,jeb); |
153 | jffs2_dbg_acct_paranoia_check(c, jeb); | 154 | jffs2_dbg_acct_paranoia_check(c, jeb); |
154 | 155 | ||
155 | goto retry; | 156 | goto retry; |
156 | } | 157 | } |
157 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); | 158 | jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n", |
159 | ret); | ||
158 | } | 160 | } |
159 | /* Release the full_dnode which is now useless, and return */ | 161 | /* Release the full_dnode which is now useless, and return */ |
160 | jffs2_free_full_dnode(fn); | 162 | jffs2_free_full_dnode(fn); |
@@ -183,10 +185,10 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
183 | fn->size = je32_to_cpu(ri->dsize); | 185 | fn->size = je32_to_cpu(ri->dsize); |
184 | fn->frags = 0; | 186 | fn->frags = 0; |
185 | 187 | ||
186 | D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", | 188 | jffs2_dbg(1, "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", |
187 | flash_ofs & ~3, flash_ofs & 3, je32_to_cpu(ri->dsize), | 189 | flash_ofs & ~3, flash_ofs & 3, je32_to_cpu(ri->dsize), |
188 | je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc), | 190 | je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc), |
189 | je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen))); | 191 | je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen)); |
190 | 192 | ||
191 | if (retried) { | 193 | if (retried) { |
192 | jffs2_dbg_acct_sanity_check(c,NULL); | 194 | jffs2_dbg_acct_sanity_check(c,NULL); |
@@ -206,9 +208,10 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
206 | int retried = 0; | 208 | int retried = 0; |
207 | int ret; | 209 | int ret; |
208 | 210 | ||
209 | D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", | 211 | jffs2_dbg(1, "%s(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", |
212 | __func__, | ||
210 | je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), | 213 | je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), |
211 | je32_to_cpu(rd->name_crc))); | 214 | je32_to_cpu(rd->name_crc)); |
212 | 215 | ||
213 | D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) { | 216 | D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) { |
214 | printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n"); | 217 | printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n"); |
@@ -249,9 +252,9 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
249 | 252 | ||
250 | if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) { | 253 | if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) { |
251 | BUG_ON(!retried); | 254 | BUG_ON(!retried); |
252 | D1(printk(KERN_DEBUG "jffs2_write_dirent : dirent_version %d, " | 255 | jffs2_dbg(1, "%s(): dirent_version %d, highest version %d -> updating dirent\n", |
253 | "highest version %d -> updating dirent\n", | 256 | __func__, |
254 | je32_to_cpu(rd->version), f->highest_version)); | 257 | je32_to_cpu(rd->version), f->highest_version); |
255 | rd->version = cpu_to_je32(++f->highest_version); | 258 | rd->version = cpu_to_je32(++f->highest_version); |
256 | fd->version = je32_to_cpu(rd->version); | 259 | fd->version = je32_to_cpu(rd->version); |
257 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); | 260 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); |
@@ -275,7 +278,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
275 | 278 | ||
276 | retried = 1; | 279 | retried = 1; |
277 | 280 | ||
278 | D1(printk(KERN_DEBUG "Retrying failed write.\n")); | 281 | jffs2_dbg(1, "Retrying failed write.\n"); |
279 | 282 | ||
280 | jffs2_dbg_acct_sanity_check(c,jeb); | 283 | jffs2_dbg_acct_sanity_check(c,jeb); |
281 | jffs2_dbg_acct_paranoia_check(c, jeb); | 284 | jffs2_dbg_acct_paranoia_check(c, jeb); |
@@ -295,12 +298,14 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
295 | 298 | ||
296 | if (!ret) { | 299 | if (!ret) { |
297 | flash_ofs = write_ofs(c); | 300 | flash_ofs = write_ofs(c); |
298 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); | 301 | jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write\n", |
302 | flash_ofs); | ||
299 | jffs2_dbg_acct_sanity_check(c,jeb); | 303 | jffs2_dbg_acct_sanity_check(c,jeb); |
300 | jffs2_dbg_acct_paranoia_check(c, jeb); | 304 | jffs2_dbg_acct_paranoia_check(c, jeb); |
301 | goto retry; | 305 | goto retry; |
302 | } | 306 | } |
303 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); | 307 | jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n", |
308 | ret); | ||
304 | } | 309 | } |
305 | /* Release the full_dnode which is now useless, and return */ | 310 | /* Release the full_dnode which is now useless, and return */ |
306 | jffs2_free_full_dirent(fd); | 311 | jffs2_free_full_dirent(fd); |
@@ -333,8 +338,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
333 | int ret = 0; | 338 | int ret = 0; |
334 | uint32_t writtenlen = 0; | 339 | uint32_t writtenlen = 0; |
335 | 340 | ||
336 | D1(printk(KERN_DEBUG "jffs2_write_inode_range(): Ino #%u, ofs 0x%x, len 0x%x\n", | 341 | jffs2_dbg(1, "%s(): Ino #%u, ofs 0x%x, len 0x%x\n", |
337 | f->inocache->ino, offset, writelen)); | 342 | __func__, f->inocache->ino, offset, writelen); |
338 | 343 | ||
339 | while(writelen) { | 344 | while(writelen) { |
340 | struct jffs2_full_dnode *fn; | 345 | struct jffs2_full_dnode *fn; |
@@ -345,12 +350,13 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
345 | int retried = 0; | 350 | int retried = 0; |
346 | 351 | ||
347 | retry: | 352 | retry: |
348 | D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset)); | 353 | jffs2_dbg(2, "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", |
354 | writelen, offset); | ||
349 | 355 | ||
350 | ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, | 356 | ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, |
351 | &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | 357 | &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); |
352 | if (ret) { | 358 | if (ret) { |
353 | D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret)); | 359 | jffs2_dbg(1, "jffs2_reserve_space returned %d\n", ret); |
354 | break; | 360 | break; |
355 | } | 361 | } |
356 | mutex_lock(&f->sem); | 362 | mutex_lock(&f->sem); |
@@ -386,7 +392,7 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
386 | if (!retried) { | 392 | if (!retried) { |
387 | /* Write error to be retried */ | 393 | /* Write error to be retried */ |
388 | retried = 1; | 394 | retried = 1; |
389 | D1(printk(KERN_DEBUG "Retrying node write in jffs2_write_inode_range()\n")); | 395 | jffs2_dbg(1, "Retrying node write in jffs2_write_inode_range()\n"); |
390 | goto retry; | 396 | goto retry; |
391 | } | 397 | } |
392 | break; | 398 | break; |
@@ -399,7 +405,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
399 | } | 405 | } |
400 | if (ret) { | 406 | if (ret) { |
401 | /* Eep */ | 407 | /* Eep */ |
402 | D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", ret)); | 408 | jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", |
409 | ret); | ||
403 | jffs2_mark_node_obsolete(c, fn->raw); | 410 | jffs2_mark_node_obsolete(c, fn->raw); |
404 | jffs2_free_full_dnode(fn); | 411 | jffs2_free_full_dnode(fn); |
405 | 412 | ||
@@ -414,7 +421,7 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
414 | ret = -EIO; | 421 | ret = -EIO; |
415 | break; | 422 | break; |
416 | } | 423 | } |
417 | D1(printk(KERN_DEBUG "increasing writtenlen by %d\n", datalen)); | 424 | jffs2_dbg(1, "increasing writtenlen by %d\n", datalen); |
418 | writtenlen += datalen; | 425 | writtenlen += datalen; |
419 | offset += datalen; | 426 | offset += datalen; |
420 | writelen -= datalen; | 427 | writelen -= datalen; |
@@ -439,7 +446,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
439 | */ | 446 | */ |
440 | ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL, | 447 | ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL, |
441 | JFFS2_SUMMARY_INODE_SIZE); | 448 | JFFS2_SUMMARY_INODE_SIZE); |
442 | D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen)); | 449 | jffs2_dbg(1, "%s(): reserved 0x%x bytes\n", __func__, alloclen); |
443 | if (ret) | 450 | if (ret) |
444 | return ret; | 451 | return ret; |
445 | 452 | ||
@@ -450,11 +457,11 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
450 | 457 | ||
451 | fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL); | 458 | fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL); |
452 | 459 | ||
453 | D1(printk(KERN_DEBUG "jffs2_do_create created file with mode 0x%x\n", | 460 | jffs2_dbg(1, "jffs2_do_create created file with mode 0x%x\n", |
454 | jemode_to_cpu(ri->mode))); | 461 | jemode_to_cpu(ri->mode)); |
455 | 462 | ||
456 | if (IS_ERR(fn)) { | 463 | if (IS_ERR(fn)) { |
457 | D1(printk(KERN_DEBUG "jffs2_write_dnode() failed\n")); | 464 | jffs2_dbg(1, "jffs2_write_dnode() failed\n"); |
458 | /* Eeek. Wave bye bye */ | 465 | /* Eeek. Wave bye bye */ |
459 | mutex_unlock(&f->sem); | 466 | mutex_unlock(&f->sem); |
460 | jffs2_complete_reservation(c); | 467 | jffs2_complete_reservation(c); |
@@ -480,7 +487,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
480 | 487 | ||
481 | if (ret) { | 488 | if (ret) { |
482 | /* Eep. */ | 489 | /* Eep. */ |
483 | D1(printk(KERN_DEBUG "jffs2_reserve_space() for dirent failed\n")); | 490 | jffs2_dbg(1, "jffs2_reserve_space() for dirent failed\n"); |
484 | return ret; | 491 | return ret; |
485 | } | 492 | } |
486 | 493 | ||
@@ -597,8 +604,8 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
597 | !memcmp(fd->name, name, namelen) && | 604 | !memcmp(fd->name, name, namelen) && |
598 | !fd->name[namelen]) { | 605 | !fd->name[namelen]) { |
599 | 606 | ||
600 | D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n", | 607 | jffs2_dbg(1, "Marking old dirent node (ino #%u) @%08x obsolete\n", |
601 | fd->ino, ref_offset(fd->raw))); | 608 | fd->ino, ref_offset(fd->raw)); |
602 | jffs2_mark_node_obsolete(c, fd->raw); | 609 | jffs2_mark_node_obsolete(c, fd->raw); |
603 | /* We don't want to remove it from the list immediately, | 610 | /* We don't want to remove it from the list immediately, |
604 | because that screws up getdents()/seek() semantics even | 611 | because that screws up getdents()/seek() semantics even |
@@ -630,8 +637,9 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
630 | printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n", | 637 | printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n", |
631 | dead_f->inocache->ino, fd->name, fd->ino); | 638 | dead_f->inocache->ino, fd->name, fd->ino); |
632 | } else { | 639 | } else { |
633 | D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n", | 640 | jffs2_dbg(1, "Removing deletion dirent for \"%s\" from dir ino #%u\n", |
634 | fd->name, dead_f->inocache->ino)); | 641 | fd->name, |
642 | dead_f->inocache->ino); | ||
635 | } | 643 | } |
636 | if (fd->raw) | 644 | if (fd->raw) |
637 | jffs2_mark_node_obsolete(c, fd->raw); | 645 | jffs2_mark_node_obsolete(c, fd->raw); |