diff options
author | Simon Kagstrom <simon.kagstrom@netinsight.net> | 2009-11-03 08:19:03 -0500 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-11-30 07:02:07 -0500 |
commit | 2e386e4bac90554887e73d6f342e845185b33fc3 (patch) | |
tree | 635a811a625aaf5ca96b9a391632aa59f8fe47f8 | |
parent | 9507b0c838e37651030d453b9cf3b136cfeefe89 (diff) |
mtd: mtdoops: refactor as a kmsg_dumper
The last messages which happens before a crash might contain interesting
information about the crash. This patch reworks mtdoops using the
kmsg_dumper support instead of a console, which simplifies the code and
also includes the messages before the oops started.
On oops callbacks, the MTD device write is scheduled in a work queue (to
be able to use the regular mtd->write call), while panics call
mtd->panic_write directly. Thus, if panic_on_oops is set, the oops will
be written out during the panic.
A parameter to specify which mtd device to use (number or name), as well
as a flag, writable at runtime, to toggle wheter to dump oopses or only
panics (since oopses can often be handled by regular syslog).
The patch was massaged and amended by Artem.
Signed-off-by: Simon Kagstrom <simon.kagstrom@netinsight.net>
Reviewed-by: Anders Grafstrom <anders.grafstrom@netinsight.net>
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
-rw-r--r-- | drivers/mtd/mtdoops.c | 235 |
1 files changed, 102 insertions, 133 deletions
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 64772dc0ea2b..a714ec482761 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c | |||
@@ -29,21 +29,34 @@ | |||
29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
30 | #include <linux/wait.h> | 30 | #include <linux/wait.h> |
31 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
34 | #include <linux/mtd/mtd.h> | 33 | #include <linux/mtd/mtd.h> |
34 | #include <linux/kmsg_dump.h> | ||
35 | 35 | ||
36 | /* Maximum MTD partition size */ | 36 | /* Maximum MTD partition size */ |
37 | #define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024) | 37 | #define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024) |
38 | 38 | ||
39 | #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00 | 39 | #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00 |
40 | #define MTDOOPS_HEADER_SIZE 8 | ||
40 | 41 | ||
41 | static unsigned long record_size = 4096; | 42 | static unsigned long record_size = 4096; |
42 | module_param(record_size, ulong, 0400); | 43 | module_param(record_size, ulong, 0400); |
43 | MODULE_PARM_DESC(record_size, | 44 | MODULE_PARM_DESC(record_size, |
44 | "record size for MTD OOPS pages in bytes (default 4096)"); | 45 | "record size for MTD OOPS pages in bytes (default 4096)"); |
45 | 46 | ||
47 | static char mtddev[80]; | ||
48 | module_param_string(mtddev, mtddev, 80, 0400); | ||
49 | MODULE_PARM_DESC(mtddev, | ||
50 | "name or index number of the MTD device to use"); | ||
51 | |||
52 | static int dump_oops = 1; | ||
53 | module_param(dump_oops, int, 0600); | ||
54 | MODULE_PARM_DESC(dump_oops, | ||
55 | "set to 1 to dump oopses, 0 to only dump panics (default 1)"); | ||
56 | |||
46 | static struct mtdoops_context { | 57 | static struct mtdoops_context { |
58 | struct kmsg_dumper dump; | ||
59 | |||
47 | int mtd_index; | 60 | int mtd_index; |
48 | struct work_struct work_erase; | 61 | struct work_struct work_erase; |
49 | struct work_struct work_write; | 62 | struct work_struct work_write; |
@@ -52,14 +65,8 @@ static struct mtdoops_context { | |||
52 | int nextpage; | 65 | int nextpage; |
53 | int nextcount; | 66 | int nextcount; |
54 | unsigned long *oops_page_used; | 67 | unsigned long *oops_page_used; |
55 | char *name; | ||
56 | 68 | ||
57 | void *oops_buf; | 69 | void *oops_buf; |
58 | |||
59 | /* writecount and disabling ready are spin lock protected */ | ||
60 | spinlock_t writecount_lock; | ||
61 | int ready; | ||
62 | int writecount; | ||
63 | } oops_cxt; | 70 | } oops_cxt; |
64 | 71 | ||
65 | static void mark_page_used(struct mtdoops_context *cxt, int page) | 72 | static void mark_page_used(struct mtdoops_context *cxt, int page) |
@@ -111,7 +118,7 @@ static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset) | |||
111 | remove_wait_queue(&wait_q, &wait); | 118 | remove_wait_queue(&wait_q, &wait); |
112 | printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n", | 119 | printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n", |
113 | (unsigned long long)erase.addr, | 120 | (unsigned long long)erase.addr, |
114 | (unsigned long long)erase.len, mtd->name); | 121 | (unsigned long long)erase.len, mtddev); |
115 | return ret; | 122 | return ret; |
116 | } | 123 | } |
117 | 124 | ||
@@ -141,7 +148,6 @@ static void mtdoops_inc_counter(struct mtdoops_context *cxt) | |||
141 | 148 | ||
142 | printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n", | 149 | printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n", |
143 | cxt->nextpage, cxt->nextcount); | 150 | cxt->nextpage, cxt->nextcount); |
144 | cxt->ready = 1; | ||
145 | } | 151 | } |
146 | 152 | ||
147 | /* Scheduled work - when we can't proceed without erasing a block */ | 153 | /* Scheduled work - when we can't proceed without erasing a block */ |
@@ -190,7 +196,6 @@ badblock: | |||
190 | if (ret >= 0) { | 196 | if (ret >= 0) { |
191 | printk(KERN_DEBUG "mtdoops: ready %d, %d\n", | 197 | printk(KERN_DEBUG "mtdoops: ready %d, %d\n", |
192 | cxt->nextpage, cxt->nextcount); | 198 | cxt->nextpage, cxt->nextcount); |
193 | cxt->ready = 1; | ||
194 | return; | 199 | return; |
195 | } | 200 | } |
196 | 201 | ||
@@ -208,11 +213,13 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic) | |||
208 | { | 213 | { |
209 | struct mtd_info *mtd = cxt->mtd; | 214 | struct mtd_info *mtd = cxt->mtd; |
210 | size_t retlen; | 215 | size_t retlen; |
216 | u32 *hdr; | ||
211 | int ret; | 217 | int ret; |
212 | 218 | ||
213 | if (cxt->writecount < record_size) | 219 | /* Add mtdoops header to the buffer */ |
214 | memset(cxt->oops_buf + cxt->writecount, 0xff, | 220 | hdr = cxt->oops_buf; |
215 | record_size - cxt->writecount); | 221 | hdr[0] = cxt->nextcount; |
222 | hdr[1] = MTDOOPS_KERNMSG_MAGIC; | ||
216 | 223 | ||
217 | if (panic) | 224 | if (panic) |
218 | ret = mtd->panic_write(mtd, cxt->nextpage * record_size, | 225 | ret = mtd->panic_write(mtd, cxt->nextpage * record_size, |
@@ -221,17 +228,15 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic) | |||
221 | ret = mtd->write(mtd, cxt->nextpage * record_size, | 228 | ret = mtd->write(mtd, cxt->nextpage * record_size, |
222 | record_size, &retlen, cxt->oops_buf); | 229 | record_size, &retlen, cxt->oops_buf); |
223 | 230 | ||
224 | cxt->writecount = 0; | ||
225 | |||
226 | if (retlen != record_size || ret < 0) | 231 | if (retlen != record_size || ret < 0) |
227 | printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n", | 232 | printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n", |
228 | cxt->nextpage * record_size, retlen, record_size, ret); | 233 | cxt->nextpage * record_size, retlen, record_size, ret); |
229 | mark_page_used(cxt, cxt->nextpage); | 234 | mark_page_used(cxt, cxt->nextpage); |
235 | memset(cxt->oops_buf, 0xff, record_size); | ||
230 | 236 | ||
231 | mtdoops_inc_counter(cxt); | 237 | mtdoops_inc_counter(cxt); |
232 | } | 238 | } |
233 | 239 | ||
234 | |||
235 | static void mtdoops_workfunc_write(struct work_struct *work) | 240 | static void mtdoops_workfunc_write(struct work_struct *work) |
236 | { | 241 | { |
237 | struct mtdoops_context *cxt = | 242 | struct mtdoops_context *cxt = |
@@ -250,17 +255,18 @@ static void find_next_position(struct mtdoops_context *cxt) | |||
250 | for (page = 0; page < cxt->oops_pages; page++) { | 255 | for (page = 0; page < cxt->oops_pages; page++) { |
251 | /* Assume the page is used */ | 256 | /* Assume the page is used */ |
252 | mark_page_used(cxt, page); | 257 | mark_page_used(cxt, page); |
253 | ret = mtd->read(mtd, page * record_size, 8, &retlen, (u_char *) &count[0]); | 258 | ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, |
254 | if (retlen != 8 || (ret < 0 && ret != -EUCLEAN)) { | 259 | &retlen, (u_char *) &count[0]); |
255 | printk(KERN_ERR "mtdoops: read failure at %ld (%td of 8 read), err %d\n", | 260 | if (retlen != MTDOOPS_HEADER_SIZE || |
256 | page * record_size, retlen, ret); | 261 | (ret < 0 && ret != -EUCLEAN)) { |
262 | printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n", | ||
263 | page * record_size, retlen, | ||
264 | MTDOOPS_HEADER_SIZE, ret); | ||
257 | continue; | 265 | continue; |
258 | } | 266 | } |
259 | 267 | ||
260 | if (count[0] == 0xffffffff && count[1] == 0xffffffff) | 268 | if (count[0] == 0xffffffff && count[1] == 0xffffffff) |
261 | mark_page_unused(cxt, page); | 269 | mark_page_unused(cxt, page); |
262 | if (count[1] != MTDOOPS_KERNMSG_MAGIC) | ||
263 | continue; | ||
264 | if (count[0] == 0xffffffff) | 270 | if (count[0] == 0xffffffff) |
265 | continue; | 271 | continue; |
266 | if (maxcount == 0xffffffff) { | 272 | if (maxcount == 0xffffffff) { |
@@ -291,15 +297,50 @@ static void find_next_position(struct mtdoops_context *cxt) | |||
291 | mtdoops_inc_counter(cxt); | 297 | mtdoops_inc_counter(cxt); |
292 | } | 298 | } |
293 | 299 | ||
300 | static void mtdoops_do_dump(struct kmsg_dumper *dumper, | ||
301 | enum kmsg_dump_reason reason, const char *s1, unsigned long l1, | ||
302 | const char *s2, unsigned long l2) | ||
303 | { | ||
304 | struct mtdoops_context *cxt = container_of(dumper, | ||
305 | struct mtdoops_context, dump); | ||
306 | unsigned long s1_start, s2_start; | ||
307 | unsigned long l1_cpy, l2_cpy; | ||
308 | char *dst; | ||
309 | |||
310 | /* Only dump oopses if dump_oops is set */ | ||
311 | if (reason == KMSG_DUMP_OOPS && !dump_oops) | ||
312 | return; | ||
313 | |||
314 | dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */ | ||
315 | l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE); | ||
316 | l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy); | ||
317 | |||
318 | s2_start = l2 - l2_cpy; | ||
319 | s1_start = l1 - l1_cpy; | ||
320 | |||
321 | memcpy(dst, s1 + s1_start, l1_cpy); | ||
322 | memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); | ||
323 | |||
324 | /* Panics must be written immediately */ | ||
325 | if (reason == KMSG_DUMP_PANIC) { | ||
326 | if (!cxt->mtd->panic_write) | ||
327 | printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n"); | ||
328 | else | ||
329 | mtdoops_write(cxt, 1); | ||
330 | return; | ||
331 | } | ||
332 | |||
333 | /* For other cases, schedule work to write it "nicely" */ | ||
334 | schedule_work(&cxt->work_write); | ||
335 | } | ||
294 | 336 | ||
295 | static void mtdoops_notify_add(struct mtd_info *mtd) | 337 | static void mtdoops_notify_add(struct mtd_info *mtd) |
296 | { | 338 | { |
297 | struct mtdoops_context *cxt = &oops_cxt; | 339 | struct mtdoops_context *cxt = &oops_cxt; |
298 | u64 mtdoops_pages = mtd->size; | 340 | u64 mtdoops_pages = div_u64(mtd->size, record_size); |
299 | 341 | int err; | |
300 | do_div(mtdoops_pages, record_size); | ||
301 | 342 | ||
302 | if (cxt->name && !strcmp(mtd->name, cxt->name)) | 343 | if (!strcmp(mtd->name, mtddev)) |
303 | cxt->mtd_index = mtd->index; | 344 | cxt->mtd_index = mtd->index; |
304 | 345 | ||
305 | if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) | 346 | if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) |
@@ -310,13 +351,11 @@ static void mtdoops_notify_add(struct mtd_info *mtd) | |||
310 | mtd->index); | 351 | mtd->index); |
311 | return; | 352 | return; |
312 | } | 353 | } |
313 | |||
314 | if (mtd->erasesize < record_size) { | 354 | if (mtd->erasesize < record_size) { |
315 | printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n", | 355 | printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n", |
316 | mtd->index); | 356 | mtd->index); |
317 | return; | 357 | return; |
318 | } | 358 | } |
319 | |||
320 | if (mtd->size > MTDOOPS_MAX_MTD_SIZE) { | 359 | if (mtd->size > MTDOOPS_MAX_MTD_SIZE) { |
321 | printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n", | 360 | printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n", |
322 | mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024); | 361 | mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024); |
@@ -327,7 +366,16 @@ static void mtdoops_notify_add(struct mtd_info *mtd) | |||
327 | cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages, | 366 | cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages, |
328 | BITS_PER_LONG)); | 367 | BITS_PER_LONG)); |
329 | if (!cxt->oops_page_used) { | 368 | if (!cxt->oops_page_used) { |
330 | printk(KERN_ERR "Could not allocate page array\n"); | 369 | printk(KERN_ERR "mtdoops: could not allocate page array\n"); |
370 | return; | ||
371 | } | ||
372 | |||
373 | cxt->dump.dump = mtdoops_do_dump; | ||
374 | err = kmsg_dump_register(&cxt->dump); | ||
375 | if (err) { | ||
376 | printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err); | ||
377 | vfree(cxt->oops_page_used); | ||
378 | cxt->oops_page_used = NULL; | ||
331 | return; | 379 | return; |
332 | } | 380 | } |
333 | 381 | ||
@@ -344,116 +392,29 @@ static void mtdoops_notify_remove(struct mtd_info *mtd) | |||
344 | if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) | 392 | if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) |
345 | return; | 393 | return; |
346 | 394 | ||
395 | if (kmsg_dump_unregister(&cxt->dump) < 0) | ||
396 | printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n"); | ||
397 | |||
347 | cxt->mtd = NULL; | 398 | cxt->mtd = NULL; |
348 | flush_scheduled_work(); | 399 | flush_scheduled_work(); |
349 | } | 400 | } |
350 | 401 | ||
351 | static void mtdoops_console_sync(void) | ||
352 | { | ||
353 | struct mtdoops_context *cxt = &oops_cxt; | ||
354 | struct mtd_info *mtd = cxt->mtd; | ||
355 | unsigned long flags; | ||
356 | |||
357 | if (!cxt->ready || !mtd || cxt->writecount == 0) | ||
358 | return; | ||
359 | |||
360 | /* | ||
361 | * Once ready is 0 and we've held the lock no further writes to the | ||
362 | * buffer will happen | ||
363 | */ | ||
364 | spin_lock_irqsave(&cxt->writecount_lock, flags); | ||
365 | if (!cxt->ready) { | ||
366 | spin_unlock_irqrestore(&cxt->writecount_lock, flags); | ||
367 | return; | ||
368 | } | ||
369 | cxt->ready = 0; | ||
370 | spin_unlock_irqrestore(&cxt->writecount_lock, flags); | ||
371 | |||
372 | if (mtd->panic_write && in_interrupt()) | ||
373 | /* Interrupt context, we're going to panic so try and log */ | ||
374 | mtdoops_write(cxt, 1); | ||
375 | else | ||
376 | schedule_work(&cxt->work_write); | ||
377 | } | ||
378 | |||
379 | static void | ||
380 | mtdoops_console_write(struct console *co, const char *s, unsigned int count) | ||
381 | { | ||
382 | struct mtdoops_context *cxt = co->data; | ||
383 | struct mtd_info *mtd = cxt->mtd; | ||
384 | unsigned long flags; | ||
385 | |||
386 | if (!oops_in_progress) { | ||
387 | mtdoops_console_sync(); | ||
388 | return; | ||
389 | } | ||
390 | |||
391 | if (!cxt->ready || !mtd) | ||
392 | return; | ||
393 | |||
394 | /* Locking on writecount ensures sequential writes to the buffer */ | ||
395 | spin_lock_irqsave(&cxt->writecount_lock, flags); | ||
396 | |||
397 | /* Check ready status didn't change whilst waiting for the lock */ | ||
398 | if (!cxt->ready) { | ||
399 | spin_unlock_irqrestore(&cxt->writecount_lock, flags); | ||
400 | return; | ||
401 | } | ||
402 | |||
403 | if (cxt->writecount == 0) { | ||
404 | u32 *stamp = cxt->oops_buf; | ||
405 | *stamp++ = cxt->nextcount; | ||
406 | *stamp = MTDOOPS_KERNMSG_MAGIC; | ||
407 | cxt->writecount = 8; | ||
408 | } | ||
409 | |||
410 | if (count + cxt->writecount > record_size) | ||
411 | count = record_size - cxt->writecount; | ||
412 | |||
413 | memcpy(cxt->oops_buf + cxt->writecount, s, count); | ||
414 | cxt->writecount += count; | ||
415 | |||
416 | spin_unlock_irqrestore(&cxt->writecount_lock, flags); | ||
417 | |||
418 | if (cxt->writecount == record_size) | ||
419 | mtdoops_console_sync(); | ||
420 | } | ||
421 | |||
422 | static int __init mtdoops_console_setup(struct console *co, char *options) | ||
423 | { | ||
424 | struct mtdoops_context *cxt = co->data; | ||
425 | |||
426 | if (cxt->mtd_index != -1 || cxt->name) | ||
427 | return -EBUSY; | ||
428 | if (options) { | ||
429 | cxt->name = kstrdup(options, GFP_KERNEL); | ||
430 | return 0; | ||
431 | } | ||
432 | if (co->index == -1) | ||
433 | return -EINVAL; | ||
434 | |||
435 | cxt->mtd_index = co->index; | ||
436 | return 0; | ||
437 | } | ||
438 | 402 | ||
439 | static struct mtd_notifier mtdoops_notifier = { | 403 | static struct mtd_notifier mtdoops_notifier = { |
440 | .add = mtdoops_notify_add, | 404 | .add = mtdoops_notify_add, |
441 | .remove = mtdoops_notify_remove, | 405 | .remove = mtdoops_notify_remove, |
442 | }; | 406 | }; |
443 | 407 | ||
444 | static struct console mtdoops_console = { | 408 | static int __init mtdoops_init(void) |
445 | .name = "ttyMTD", | ||
446 | .write = mtdoops_console_write, | ||
447 | .setup = mtdoops_console_setup, | ||
448 | .unblank = mtdoops_console_sync, | ||
449 | .index = -1, | ||
450 | .data = &oops_cxt, | ||
451 | }; | ||
452 | |||
453 | static int __init mtdoops_console_init(void) | ||
454 | { | 409 | { |
455 | struct mtdoops_context *cxt = &oops_cxt; | 410 | struct mtdoops_context *cxt = &oops_cxt; |
411 | int mtd_index; | ||
412 | char *endp; | ||
456 | 413 | ||
414 | if (strlen(mtddev) == 0) { | ||
415 | printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n"); | ||
416 | return -EINVAL; | ||
417 | } | ||
457 | if ((record_size & 4095) != 0) { | 418 | if ((record_size & 4095) != 0) { |
458 | printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n"); | 419 | printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n"); |
459 | return -EINVAL; | 420 | return -EINVAL; |
@@ -462,36 +423,44 @@ static int __init mtdoops_console_init(void) | |||
462 | printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n"); | 423 | printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n"); |
463 | return -EINVAL; | 424 | return -EINVAL; |
464 | } | 425 | } |
426 | |||
427 | /* Setup the MTD device to use */ | ||
465 | cxt->mtd_index = -1; | 428 | cxt->mtd_index = -1; |
429 | mtd_index = simple_strtoul(mtddev, &endp, 0); | ||
430 | if (*endp == '\0') | ||
431 | cxt->mtd_index = mtd_index; | ||
432 | if (cxt->mtd_index > MAX_MTD_DEVICES) { | ||
433 | printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n", | ||
434 | mtd_index); | ||
435 | return -EINVAL; | ||
436 | } | ||
437 | |||
466 | cxt->oops_buf = vmalloc(record_size); | 438 | cxt->oops_buf = vmalloc(record_size); |
467 | if (!cxt->oops_buf) { | 439 | if (!cxt->oops_buf) { |
468 | printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n"); | 440 | printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n"); |
469 | return -ENOMEM; | 441 | return -ENOMEM; |
470 | } | 442 | } |
443 | memset(cxt->oops_buf, 0xff, record_size); | ||
471 | 444 | ||
472 | spin_lock_init(&cxt->writecount_lock); | ||
473 | INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); | 445 | INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); |
474 | INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); | 446 | INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); |
475 | 447 | ||
476 | register_console(&mtdoops_console); | ||
477 | register_mtd_user(&mtdoops_notifier); | 448 | register_mtd_user(&mtdoops_notifier); |
478 | return 0; | 449 | return 0; |
479 | } | 450 | } |
480 | 451 | ||
481 | static void __exit mtdoops_console_exit(void) | 452 | static void __exit mtdoops_exit(void) |
482 | { | 453 | { |
483 | struct mtdoops_context *cxt = &oops_cxt; | 454 | struct mtdoops_context *cxt = &oops_cxt; |
484 | 455 | ||
485 | unregister_mtd_user(&mtdoops_notifier); | 456 | unregister_mtd_user(&mtdoops_notifier); |
486 | unregister_console(&mtdoops_console); | ||
487 | kfree(cxt->name); | ||
488 | vfree(cxt->oops_buf); | 457 | vfree(cxt->oops_buf); |
489 | vfree(cxt->oops_page_used); | 458 | vfree(cxt->oops_page_used); |
490 | } | 459 | } |
491 | 460 | ||
492 | 461 | ||
493 | subsys_initcall(mtdoops_console_init); | 462 | module_init(mtdoops_init); |
494 | module_exit(mtdoops_console_exit); | 463 | module_exit(mtdoops_exit); |
495 | 464 | ||
496 | MODULE_LICENSE("GPL"); | 465 | MODULE_LICENSE("GPL"); |
497 | MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>"); | 466 | MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>"); |