aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/mtdoops.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/mtdoops.c')
-rw-r--r--drivers/mtd/mtdoops.c185
1 files changed, 124 insertions, 61 deletions
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index f8af627f0b98..d3cf05012b46 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -28,19 +28,26 @@
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/wait.h> 30#include <linux/wait.h>
31#include <linux/delay.h>
32#include <linux/spinlock.h>
33#include <linux/interrupt.h>
31#include <linux/mtd/mtd.h> 34#include <linux/mtd/mtd.h>
32 35
33#define OOPS_PAGE_SIZE 4096 36#define OOPS_PAGE_SIZE 4096
34 37
35static struct mtdoops_context { 38struct mtdoops_context {
36 int mtd_index; 39 int mtd_index;
37 struct work_struct work; 40 struct work_struct work_erase;
41 struct work_struct work_write;
38 struct mtd_info *mtd; 42 struct mtd_info *mtd;
39 int oops_pages; 43 int oops_pages;
40 int nextpage; 44 int nextpage;
41 int nextcount; 45 int nextcount;
42 46
43 void *oops_buf; 47 void *oops_buf;
48
49 /* writecount and disabling ready are spin lock protected */
50 spinlock_t writecount_lock;
44 int ready; 51 int ready;
45 int writecount; 52 int writecount;
46} oops_cxt; 53} oops_cxt;
@@ -62,10 +69,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
62 erase.mtd = mtd; 69 erase.mtd = mtd;
63 erase.callback = mtdoops_erase_callback; 70 erase.callback = mtdoops_erase_callback;
64 erase.addr = offset; 71 erase.addr = offset;
65 if (mtd->erasesize < OOPS_PAGE_SIZE) 72 erase.len = mtd->erasesize;
66 erase.len = OOPS_PAGE_SIZE;
67 else
68 erase.len = mtd->erasesize;
69 erase.priv = (u_long)&wait_q; 73 erase.priv = (u_long)&wait_q;
70 74
71 set_current_state(TASK_INTERRUPTIBLE); 75 set_current_state(TASK_INTERRUPTIBLE);
@@ -87,7 +91,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
87 return 0; 91 return 0;
88} 92}
89 93
90static int mtdoops_inc_counter(struct mtdoops_context *cxt) 94static void mtdoops_inc_counter(struct mtdoops_context *cxt)
91{ 95{
92 struct mtd_info *mtd = cxt->mtd; 96 struct mtd_info *mtd = cxt->mtd;
93 size_t retlen; 97 size_t retlen;
@@ -103,25 +107,30 @@ static int mtdoops_inc_counter(struct mtdoops_context *cxt)
103 107
104 ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, 108 ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4,
105 &retlen, (u_char *) &count); 109 &retlen, (u_char *) &count);
106 if ((retlen != 4) || (ret < 0)) { 110 if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
107 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" 111 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
108 ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE, 112 ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE,
109 retlen, ret); 113 retlen, ret);
110 return 1; 114 schedule_work(&cxt->work_erase);
115 return;
111 } 116 }
112 117
113 /* See if we need to erase the next block */ 118 /* See if we need to erase the next block */
114 if (count != 0xffffffff) 119 if (count != 0xffffffff) {
115 return 1; 120 schedule_work(&cxt->work_erase);
121 return;
122 }
116 123
117 printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n", 124 printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n",
118 cxt->nextpage, cxt->nextcount); 125 cxt->nextpage, cxt->nextcount);
119 cxt->ready = 1; 126 cxt->ready = 1;
120 return 0;
121} 127}
122 128
123static void mtdoops_prepare(struct mtdoops_context *cxt) 129/* Scheduled work - when we can't proceed without erasing a block */
130static void mtdoops_workfunc_erase(struct work_struct *work)
124{ 131{
132 struct mtdoops_context *cxt =
133 container_of(work, struct mtdoops_context, work_erase);
125 struct mtd_info *mtd = cxt->mtd; 134 struct mtd_info *mtd = cxt->mtd;
126 int i = 0, j, ret, mod; 135 int i = 0, j, ret, mod;
127 136
@@ -136,8 +145,14 @@ static void mtdoops_prepare(struct mtdoops_context *cxt)
136 cxt->nextpage = 0; 145 cxt->nextpage = 0;
137 } 146 }
138 147
139 while (mtd->block_isbad && 148 while (mtd->block_isbad) {
140 mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE)) { 149 ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
150 if (!ret)
151 break;
152 if (ret < 0) {
153 printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n");
154 return;
155 }
141badblock: 156badblock:
142 printk(KERN_WARNING "mtdoops: Bad block at %08x\n", 157 printk(KERN_WARNING "mtdoops: Bad block at %08x\n",
143 cxt->nextpage * OOPS_PAGE_SIZE); 158 cxt->nextpage * OOPS_PAGE_SIZE);
@@ -154,34 +169,72 @@ badblock:
154 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) 169 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
155 ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 170 ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
156 171
157 if (ret < 0) { 172 if (ret >= 0) {
158 if (mtd->block_markbad) 173 printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
159 mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 174 cxt->ready = 1;
160 goto badblock; 175 return;
161 } 176 }
162 177
163 printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); 178 if (mtd->block_markbad && (ret == -EIO)) {
179 ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
180 if (ret < 0) {
181 printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n");
182 return;
183 }
184 }
185 goto badblock;
186}
164 187
165 cxt->ready = 1; 188static void mtdoops_write(struct mtdoops_context *cxt, int panic)
189{
190 struct mtd_info *mtd = cxt->mtd;
191 size_t retlen;
192 int ret;
193
194 if (cxt->writecount < OOPS_PAGE_SIZE)
195 memset(cxt->oops_buf + cxt->writecount, 0xff,
196 OOPS_PAGE_SIZE - cxt->writecount);
197
198 if (panic)
199 ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
200 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
201 else
202 ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
203 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
204
205 cxt->writecount = 0;
206
207 if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
208 printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
209 cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
210
211 mtdoops_inc_counter(cxt);
166} 212}
167 213
168static void mtdoops_workfunc(struct work_struct *work) 214
215static void mtdoops_workfunc_write(struct work_struct *work)
169{ 216{
170 struct mtdoops_context *cxt = 217 struct mtdoops_context *cxt =
171 container_of(work, struct mtdoops_context, work); 218 container_of(work, struct mtdoops_context, work_write);
172 219
173 mtdoops_prepare(cxt); 220 mtdoops_write(cxt, 0);
174} 221}
175 222
176static int find_next_position(struct mtdoops_context *cxt) 223static void find_next_position(struct mtdoops_context *cxt)
177{ 224{
178 struct mtd_info *mtd = cxt->mtd; 225 struct mtd_info *mtd = cxt->mtd;
179 int page, maxpos = 0; 226 int ret, page, maxpos = 0;
180 u32 count, maxcount = 0xffffffff; 227 u32 count, maxcount = 0xffffffff;
181 size_t retlen; 228 size_t retlen;
182 229
183 for (page = 0; page < cxt->oops_pages; page++) { 230 for (page = 0; page < cxt->oops_pages; page++) {
184 mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count); 231 ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count);
232 if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
233 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
234 ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret);
235 continue;
236 }
237
185 if (count == 0xffffffff) 238 if (count == 0xffffffff)
186 continue; 239 continue;
187 if (maxcount == 0xffffffff) { 240 if (maxcount == 0xffffffff) {
@@ -205,20 +258,19 @@ static int find_next_position(struct mtdoops_context *cxt)
205 cxt->ready = 1; 258 cxt->ready = 1;
206 printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n", 259 printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n",
207 cxt->nextpage, cxt->nextcount); 260 cxt->nextpage, cxt->nextcount);
208 return 0; 261 return;
209 } 262 }
210 263
211 cxt->nextpage = maxpos; 264 cxt->nextpage = maxpos;
212 cxt->nextcount = maxcount; 265 cxt->nextcount = maxcount;
213 266
214 return mtdoops_inc_counter(cxt); 267 mtdoops_inc_counter(cxt);
215} 268}
216 269
217 270
218static void mtdoops_notify_add(struct mtd_info *mtd) 271static void mtdoops_notify_add(struct mtd_info *mtd)
219{ 272{
220 struct mtdoops_context *cxt = &oops_cxt; 273 struct mtdoops_context *cxt = &oops_cxt;
221 int ret;
222 274
223 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) 275 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
224 return; 276 return;
@@ -229,14 +281,18 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
229 return; 281 return;
230 } 282 }
231 283
284 if (mtd->erasesize < OOPS_PAGE_SIZE) {
285 printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n",
286 mtd->index);
287 return;
288 }
289
232 cxt->mtd = mtd; 290 cxt->mtd = mtd;
233 cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE; 291 cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE;
234 292
235 ret = find_next_position(cxt); 293 find_next_position(cxt);
236 if (ret == 1)
237 mtdoops_prepare(cxt);
238 294
239 printk(KERN_DEBUG "mtdoops: Attached to MTD device %d\n", mtd->index); 295 printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
240} 296}
241 297
242static void mtdoops_notify_remove(struct mtd_info *mtd) 298static void mtdoops_notify_remove(struct mtd_info *mtd)
@@ -254,31 +310,28 @@ static void mtdoops_console_sync(void)
254{ 310{
255 struct mtdoops_context *cxt = &oops_cxt; 311 struct mtdoops_context *cxt = &oops_cxt;
256 struct mtd_info *mtd = cxt->mtd; 312 struct mtd_info *mtd = cxt->mtd;
257 size_t retlen; 313 unsigned long flags;
258 int ret;
259 314
260 if (!cxt->ready || !mtd) 315 if (!cxt->ready || !mtd || cxt->writecount == 0)
261 return; 316 return;
262 317
263 if (cxt->writecount == 0) 318 /*
319 * Once ready is 0 and we've held the lock no further writes to the
320 * buffer will happen
321 */
322 spin_lock_irqsave(&cxt->writecount_lock, flags);
323 if (!cxt->ready) {
324 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
264 return; 325 return;
265 326 }
266 if (cxt->writecount < OOPS_PAGE_SIZE)
267 memset(cxt->oops_buf + cxt->writecount, 0xff,
268 OOPS_PAGE_SIZE - cxt->writecount);
269
270 ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
271 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
272 cxt->ready = 0; 327 cxt->ready = 0;
273 cxt->writecount = 0; 328 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
274
275 if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
276 printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
277 cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
278 329
279 ret = mtdoops_inc_counter(cxt); 330 if (mtd->panic_write && in_interrupt())
280 if (ret == 1) 331 /* Interrupt context, we're going to panic so try and log */
281 schedule_work(&cxt->work); 332 mtdoops_write(cxt, 1);
333 else
334 schedule_work(&cxt->work_write);
282} 335}
283 336
284static void 337static void
@@ -286,7 +339,7 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
286{ 339{
287 struct mtdoops_context *cxt = co->data; 340 struct mtdoops_context *cxt = co->data;
288 struct mtd_info *mtd = cxt->mtd; 341 struct mtd_info *mtd = cxt->mtd;
289 int i; 342 unsigned long flags;
290 343
291 if (!oops_in_progress) { 344 if (!oops_in_progress) {
292 mtdoops_console_sync(); 345 mtdoops_console_sync();
@@ -296,6 +349,13 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
296 if (!cxt->ready || !mtd) 349 if (!cxt->ready || !mtd)
297 return; 350 return;
298 351
352 /* Locking on writecount ensures sequential writes to the buffer */
353 spin_lock_irqsave(&cxt->writecount_lock, flags);
354
355 /* Check ready status didn't change whilst waiting for the lock */
356 if (!cxt->ready)
357 return;
358
299 if (cxt->writecount == 0) { 359 if (cxt->writecount == 0) {
300 u32 *stamp = cxt->oops_buf; 360 u32 *stamp = cxt->oops_buf;
301 *stamp = cxt->nextcount; 361 *stamp = cxt->nextcount;
@@ -305,10 +365,13 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
305 if ((count + cxt->writecount) > OOPS_PAGE_SIZE) 365 if ((count + cxt->writecount) > OOPS_PAGE_SIZE)
306 count = OOPS_PAGE_SIZE - cxt->writecount; 366 count = OOPS_PAGE_SIZE - cxt->writecount;
307 367
308 for (i = 0; i < count; i++, s++) 368 memcpy(cxt->oops_buf + cxt->writecount, s, count);
309 *((char *)(cxt->oops_buf) + cxt->writecount + i) = *s; 369 cxt->writecount += count;
310 370
311 cxt->writecount = cxt->writecount + count; 371 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
372
373 if (cxt->writecount == OOPS_PAGE_SIZE)
374 mtdoops_console_sync();
312} 375}
313 376
314static int __init mtdoops_console_setup(struct console *co, char *options) 377static int __init mtdoops_console_setup(struct console *co, char *options)
@@ -334,7 +397,6 @@ static struct console mtdoops_console = {
334 .write = mtdoops_console_write, 397 .write = mtdoops_console_write,
335 .setup = mtdoops_console_setup, 398 .setup = mtdoops_console_setup,
336 .unblank = mtdoops_console_sync, 399 .unblank = mtdoops_console_sync,
337 .flags = CON_PRINTBUFFER,
338 .index = -1, 400 .index = -1,
339 .data = &oops_cxt, 401 .data = &oops_cxt,
340}; 402};
@@ -347,11 +409,12 @@ static int __init mtdoops_console_init(void)
347 cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); 409 cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE);
348 410
349 if (!cxt->oops_buf) { 411 if (!cxt->oops_buf) {
350 printk(KERN_ERR "Failed to allocate oops buffer workspace\n"); 412 printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n");
351 return -ENOMEM; 413 return -ENOMEM;
352 } 414 }
353 415
354 INIT_WORK(&cxt->work, mtdoops_workfunc); 416 INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
417 INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
355 418
356 register_console(&mtdoops_console); 419 register_console(&mtdoops_console);
357 register_mtd_user(&mtdoops_notifier); 420 register_mtd_user(&mtdoops_notifier);