diff options
author | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2009-10-11 06:40:40 -0400 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-11-30 07:01:56 -0500 |
commit | a15b124fc4f15b2c4fc51669c936a30ce179d1f7 (patch) | |
tree | 7efd103ad36ec7bdbb564dcf26a5676d40bee192 /drivers/mtd/mtdoops.c | |
parent | 456b565cc52fbcdaa2e19ffdf40d9dd3b726d603 (diff) |
mtd: mtdoops: several minor cleanups
While looking into the mtdoops module, I've spotted several minor
imperfections. This patch addresses them. Namely:
1. Remove several trailing white-spaces and tabs
2. Check 'vmalloc()' return code straight away, not several lines
below in the 'mtdoops_console_init()' function.
3. Clean up printks - make them more consistent and use the same
code formatting style for them.
4. Remove silly style of putting brackets around everything in
"if" operators.
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Cc: Simon Kagstrom <simon.kagstrom@netinsight.net>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/mtd/mtdoops.c')
-rw-r--r-- | drivers/mtd/mtdoops.c | 81 |
1 files changed, 40 insertions, 41 deletions
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 1060337c06df..c383add060d8 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c | |||
@@ -81,9 +81,9 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset) | |||
81 | if (ret) { | 81 | if (ret) { |
82 | set_current_state(TASK_RUNNING); | 82 | set_current_state(TASK_RUNNING); |
83 | remove_wait_queue(&wait_q, &wait); | 83 | remove_wait_queue(&wait_q, &wait); |
84 | printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] " | 84 | printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n", |
85 | "on \"%s\" failed\n", | 85 | (unsigned long long)erase.addr, |
86 | (unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name); | 86 | (unsigned long long)erase.len, mtd->name); |
87 | return ret; | 87 | return ret; |
88 | } | 88 | } |
89 | 89 | ||
@@ -109,10 +109,9 @@ static void mtdoops_inc_counter(struct mtdoops_context *cxt) | |||
109 | 109 | ||
110 | ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, | 110 | ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, |
111 | &retlen, (u_char *) &count); | 111 | &retlen, (u_char *) &count); |
112 | if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) { | 112 | if (retlen != 4 || (ret < 0 && ret != -EUCLEAN)) { |
113 | printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" | 113 | printk(KERN_ERR "mtdoops: read failure at %d (%td of 4 read), err %d\n", |
114 | ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE, | 114 | cxt->nextpage * OOPS_PAGE_SIZE, retlen, ret); |
115 | retlen, ret); | ||
116 | schedule_work(&cxt->work_erase); | 115 | schedule_work(&cxt->work_erase); |
117 | return; | 116 | return; |
118 | } | 117 | } |
@@ -123,8 +122,8 @@ static void mtdoops_inc_counter(struct mtdoops_context *cxt) | |||
123 | return; | 122 | return; |
124 | } | 123 | } |
125 | 124 | ||
126 | printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n", | 125 | printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n", |
127 | cxt->nextpage, cxt->nextcount); | 126 | cxt->nextpage, cxt->nextcount); |
128 | cxt->ready = 1; | 127 | cxt->ready = 1; |
129 | } | 128 | } |
130 | 129 | ||
@@ -152,18 +151,18 @@ static void mtdoops_workfunc_erase(struct work_struct *work) | |||
152 | if (!ret) | 151 | if (!ret) |
153 | break; | 152 | break; |
154 | if (ret < 0) { | 153 | if (ret < 0) { |
155 | printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n"); | 154 | printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n"); |
156 | return; | 155 | return; |
157 | } | 156 | } |
158 | badblock: | 157 | badblock: |
159 | printk(KERN_WARNING "mtdoops: Bad block at %08x\n", | 158 | printk(KERN_WARNING "mtdoops: bad block at %08x\n", |
160 | cxt->nextpage * OOPS_PAGE_SIZE); | 159 | cxt->nextpage * OOPS_PAGE_SIZE); |
161 | i++; | 160 | i++; |
162 | cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE); | 161 | cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE); |
163 | if (cxt->nextpage >= cxt->oops_pages) | 162 | if (cxt->nextpage >= cxt->oops_pages) |
164 | cxt->nextpage = 0; | 163 | cxt->nextpage = 0; |
165 | if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) { | 164 | if (i == cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE)) { |
166 | printk(KERN_ERR "mtdoops: All blocks bad!\n"); | 165 | printk(KERN_ERR "mtdoops: all blocks bad!\n"); |
167 | return; | 166 | return; |
168 | } | 167 | } |
169 | } | 168 | } |
@@ -172,15 +171,16 @@ badblock: | |||
172 | ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); | 171 | ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); |
173 | 172 | ||
174 | if (ret >= 0) { | 173 | if (ret >= 0) { |
175 | printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); | 174 | printk(KERN_DEBUG "mtdoops: ready %d, %d\n", |
175 | cxt->nextpage, cxt->nextcount); | ||
176 | cxt->ready = 1; | 176 | cxt->ready = 1; |
177 | return; | 177 | return; |
178 | } | 178 | } |
179 | 179 | ||
180 | if (mtd->block_markbad && (ret == -EIO)) { | 180 | if (mtd->block_markbad && ret == -EIO) { |
181 | ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); | 181 | ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); |
182 | if (ret < 0) { | 182 | if (ret < 0) { |
183 | printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n"); | 183 | printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); |
184 | return; | 184 | return; |
185 | } | 185 | } |
186 | } | 186 | } |
@@ -206,9 +206,9 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic) | |||
206 | 206 | ||
207 | cxt->writecount = 0; | 207 | cxt->writecount = 0; |
208 | 208 | ||
209 | if ((retlen != OOPS_PAGE_SIZE) || (ret < 0)) | 209 | if (retlen != OOPS_PAGE_SIZE || ret < 0) |
210 | printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n", | 210 | printk(KERN_ERR "mtdoops: write failure at %d (%td of %d written), error %d\n", |
211 | cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); | 211 | cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); |
212 | 212 | ||
213 | mtdoops_inc_counter(cxt); | 213 | mtdoops_inc_counter(cxt); |
214 | } | 214 | } |
@@ -220,7 +220,7 @@ static void mtdoops_workfunc_write(struct work_struct *work) | |||
220 | container_of(work, struct mtdoops_context, work_write); | 220 | container_of(work, struct mtdoops_context, work_write); |
221 | 221 | ||
222 | mtdoops_write(cxt, 0); | 222 | mtdoops_write(cxt, 0); |
223 | } | 223 | } |
224 | 224 | ||
225 | static void find_next_position(struct mtdoops_context *cxt) | 225 | static void find_next_position(struct mtdoops_context *cxt) |
226 | { | 226 | { |
@@ -231,9 +231,9 @@ static void find_next_position(struct mtdoops_context *cxt) | |||
231 | 231 | ||
232 | for (page = 0; page < cxt->oops_pages; page++) { | 232 | for (page = 0; page < cxt->oops_pages; page++) { |
233 | ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]); | 233 | ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]); |
234 | if ((retlen != 8) || ((ret < 0) && (ret != -EUCLEAN))) { | 234 | if (retlen != 8 || (ret < 0 && ret != -EUCLEAN)) { |
235 | printk(KERN_ERR "mtdoops: Read failure at %d (%td of 8 read)" | 235 | printk(KERN_ERR "mtdoops: read failure at %d (%td of 8 read), err %d\n", |
236 | ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret); | 236 | page * OOPS_PAGE_SIZE, retlen, ret); |
237 | continue; | 237 | continue; |
238 | } | 238 | } |
239 | 239 | ||
@@ -244,14 +244,14 @@ static void find_next_position(struct mtdoops_context *cxt) | |||
244 | if (maxcount == 0xffffffff) { | 244 | if (maxcount == 0xffffffff) { |
245 | maxcount = count[0]; | 245 | maxcount = count[0]; |
246 | maxpos = page; | 246 | maxpos = page; |
247 | } else if ((count[0] < 0x40000000) && (maxcount > 0xc0000000)) { | 247 | } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) { |
248 | maxcount = count[0]; | 248 | maxcount = count[0]; |
249 | maxpos = page; | 249 | maxpos = page; |
250 | } else if ((count[0] > maxcount) && (count[0] < 0xc0000000)) { | 250 | } else if (count[0] > maxcount && count[0] < 0xc0000000) { |
251 | maxcount = count[0]; | 251 | maxcount = count[0]; |
252 | maxpos = page; | 252 | maxpos = page; |
253 | } else if ((count[0] > maxcount) && (count[0] > 0xc0000000) | 253 | } else if (count[0] > maxcount && count[0] > 0xc0000000 |
254 | && (maxcount > 0x80000000)) { | 254 | && maxcount > 0x80000000) { |
255 | maxcount = count[0]; | 255 | maxcount = count[0]; |
256 | maxpos = page; | 256 | maxpos = page; |
257 | } | 257 | } |
@@ -277,18 +277,18 @@ static void mtdoops_notify_add(struct mtd_info *mtd) | |||
277 | if (cxt->name && !strcmp(mtd->name, cxt->name)) | 277 | if (cxt->name && !strcmp(mtd->name, cxt->name)) |
278 | cxt->mtd_index = mtd->index; | 278 | cxt->mtd_index = mtd->index; |
279 | 279 | ||
280 | if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) | 280 | if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) |
281 | return; | 281 | return; |
282 | 282 | ||
283 | if (mtd->size < (mtd->erasesize * 2)) { | 283 | if (mtd->size < mtd->erasesize * 2) { |
284 | printk(KERN_ERR "MTD partition %d not big enough for mtdoops\n", | 284 | printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n", |
285 | mtd->index); | 285 | mtd->index); |
286 | return; | 286 | return; |
287 | } | 287 | } |
288 | 288 | ||
289 | if (mtd->erasesize < OOPS_PAGE_SIZE) { | 289 | if (mtd->erasesize < OOPS_PAGE_SIZE) { |
290 | printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n", | 290 | printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n", |
291 | mtd->index); | 291 | mtd->index); |
292 | return; | 292 | return; |
293 | } | 293 | } |
294 | 294 | ||
@@ -307,7 +307,7 @@ static void mtdoops_notify_remove(struct mtd_info *mtd) | |||
307 | { | 307 | { |
308 | struct mtdoops_context *cxt = &oops_cxt; | 308 | struct mtdoops_context *cxt = &oops_cxt; |
309 | 309 | ||
310 | if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) | 310 | if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) |
311 | return; | 311 | return; |
312 | 312 | ||
313 | cxt->mtd = NULL; | 313 | cxt->mtd = NULL; |
@@ -323,8 +323,8 @@ static void mtdoops_console_sync(void) | |||
323 | if (!cxt->ready || !mtd || cxt->writecount == 0) | 323 | if (!cxt->ready || !mtd || cxt->writecount == 0) |
324 | return; | 324 | return; |
325 | 325 | ||
326 | /* | 326 | /* |
327 | * Once ready is 0 and we've held the lock no further writes to the | 327 | * Once ready is 0 and we've held the lock no further writes to the |
328 | * buffer will happen | 328 | * buffer will happen |
329 | */ | 329 | */ |
330 | spin_lock_irqsave(&cxt->writecount_lock, flags); | 330 | spin_lock_irqsave(&cxt->writecount_lock, flags); |
@@ -373,7 +373,7 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count) | |||
373 | cxt->writecount = 8; | 373 | cxt->writecount = 8; |
374 | } | 374 | } |
375 | 375 | ||
376 | if ((count + cxt->writecount) > OOPS_PAGE_SIZE) | 376 | if (count + cxt->writecount > OOPS_PAGE_SIZE) |
377 | count = OOPS_PAGE_SIZE - cxt->writecount; | 377 | count = OOPS_PAGE_SIZE - cxt->writecount; |
378 | 378 | ||
379 | memcpy(cxt->oops_buf + cxt->writecount, s, count); | 379 | memcpy(cxt->oops_buf + cxt->writecount, s, count); |
@@ -422,13 +422,12 @@ static int __init mtdoops_console_init(void) | |||
422 | 422 | ||
423 | cxt->mtd_index = -1; | 423 | cxt->mtd_index = -1; |
424 | cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); | 424 | cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); |
425 | spin_lock_init(&cxt->writecount_lock); | ||
426 | |||
427 | if (!cxt->oops_buf) { | 425 | if (!cxt->oops_buf) { |
428 | printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n"); | 426 | printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n"); |
429 | return -ENOMEM; | 427 | return -ENOMEM; |
430 | } | 428 | } |
431 | 429 | ||
430 | spin_lock_init(&cxt->writecount_lock); | ||
432 | INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); | 431 | INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); |
433 | INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); | 432 | INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); |
434 | 433 | ||