aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/Kconfig8
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/mtdoops.c365
3 files changed, 374 insertions, 0 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index fbec8cd55e38..8848e8ac705d 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -278,6 +278,14 @@ config SSFDC
278 This enables read only access to SmartMedia formatted NAND 278 This enables read only access to SmartMedia formatted NAND
279 flash. You can mount it with FAT file system. 279 flash. You can mount it with FAT file system.
280 280
281config MTD_OOPS
282 tristate "Log panic/oops to an MTD buffer"
283 depends on MTD
284 help
285 This enables panic and oops messages to be logged to a circular
286 buffer in a flash partition where it can be read back at some
287 later point.
288
281source "drivers/mtd/chips/Kconfig" 289source "drivers/mtd/chips/Kconfig"
282 290
283source "drivers/mtd/maps/Kconfig" 291source "drivers/mtd/maps/Kconfig"
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 451adcc52b3c..024d0e5e3e5d 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_NFTL) += nftl.o
22obj-$(CONFIG_INFTL) += inftl.o 22obj-$(CONFIG_INFTL) += inftl.o
23obj-$(CONFIG_RFD_FTL) += rfd_ftl.o 23obj-$(CONFIG_RFD_FTL) += rfd_ftl.o
24obj-$(CONFIG_SSFDC) += ssfdc.o 24obj-$(CONFIG_SSFDC) += ssfdc.o
25obj-$(CONFIG_MTD_OOPS) += mtdoops.o
25 26
26nftl-objs := nftlcore.o nftlmount.o 27nftl-objs := nftlcore.o nftlmount.o
27inftl-objs := inftlcore.o inftlmount.o 28inftl-objs := inftlcore.o inftlmount.o
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
new file mode 100644
index 000000000000..cfc28ab4a3dc
--- /dev/null
+++ b/drivers/mtd/mtdoops.c
@@ -0,0 +1,365 @@
1/*
2 * MTD Oops/Panic logger
3 *
4 * Copyright (C) 2007 Nokia Corporation. All rights reserved.
5 *
6 * Author: Richard Purdie <rpurdie@openedhand.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/console.h>
27#include <linux/vmalloc.h>
28#include <linux/workqueue.h>
29#include <linux/sched.h>
30#include <linux/wait.h>
31#include <linux/mtd/mtd.h>
32
33#define OOPS_PAGE_SIZE 4096
34
35static struct mtdoops_context {
36 int mtd_index;
37 struct work_struct work;
38 struct mtd_info *mtd;
39 int oops_pages;
40 int nextpage;
41 int nextcount;
42
43 void *oops_buf;
44 int ready;
45 int writecount;
46} oops_cxt;
47
48static void mtdoops_erase_callback(struct erase_info *done)
49{
50 wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
51 wake_up(wait_q);
52}
53
54static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
55{
56 struct erase_info erase;
57 DECLARE_WAITQUEUE(wait, current);
58 wait_queue_head_t wait_q;
59 int ret;
60
61 init_waitqueue_head(&wait_q);
62 erase.mtd = mtd;
63 erase.callback = mtdoops_erase_callback;
64 erase.addr = offset;
65 if (mtd->erasesize < OOPS_PAGE_SIZE)
66 erase.len = OOPS_PAGE_SIZE;
67 else
68 erase.len = mtd->erasesize;
69 erase.priv = (u_long)&wait_q;
70
71 set_current_state(TASK_INTERRUPTIBLE);
72 add_wait_queue(&wait_q, &wait);
73
74 ret = mtd->erase(mtd, &erase);
75 if (ret) {
76 set_current_state(TASK_RUNNING);
77 remove_wait_queue(&wait_q, &wait);
78 printk (KERN_WARNING "mtdoops: erase of region [0x%x, 0x%x] "
79 "on \"%s\" failed\n",
80 erase.addr, erase.len, mtd->name);
81 return ret;
82 }
83
84 schedule(); /* Wait for erase to finish. */
85 remove_wait_queue(&wait_q, &wait);
86
87 return 0;
88}
89
90static int mtdoops_inc_counter(struct mtdoops_context *cxt)
91{
92 struct mtd_info *mtd = cxt->mtd;
93 size_t retlen;
94 u32 count;
95 int ret;
96
97 cxt->nextpage++;
98 if (cxt->nextpage > cxt->oops_pages)
99 cxt->nextpage = 0;
100 cxt->nextcount++;
101 if (cxt->nextcount == 0xffffffff)
102 cxt->nextcount = 0;
103
104 ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4,
105 &retlen, (u_char *) &count);
106 if ((retlen != 4) || (ret < 0)) {
107 printk(KERN_ERR "mtdoops: Read failure at %d (%d of 4 read)"
108 ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE,
109 retlen, ret);
110 return 1;
111 }
112
113 /* See if we need to erase the next block */
114 if (count != 0xffffffff)
115 return 1;
116
117 printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n",
118 cxt->nextpage, cxt->nextcount);
119 cxt->ready = 1;
120 return 0;
121}
122
123static void mtdoops_prepare(struct mtdoops_context *cxt)
124{
125 struct mtd_info *mtd = cxt->mtd;
126 int i = 0, j, ret, mod;
127
128 /* We were unregistered */
129 if (!mtd)
130 return;
131
132 mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize;
133 if (mod != 0) {
134 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE);
135 if (cxt->nextpage > cxt->oops_pages)
136 cxt->nextpage = 0;
137 }
138
139 while (mtd->block_isbad &&
140 mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE)) {
141badblock:
142 printk(KERN_WARNING "mtdoops: Bad block at %08x\n",
143 cxt->nextpage * OOPS_PAGE_SIZE);
144 i++;
145 cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE);
146 if (cxt->nextpage > cxt->oops_pages)
147 cxt->nextpage = 0;
148 if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) {
149 printk(KERN_ERR "mtdoops: All blocks bad!\n");
150 return;
151 }
152 }
153
154 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
155 ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
156
157 if (ret < 0) {
158 if (mtd->block_markbad)
159 mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
160 goto badblock;
161 }
162
163 printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
164
165 cxt->ready = 1;
166}
167
168static void mtdoops_workfunc(struct work_struct *work)
169{
170 struct mtdoops_context *cxt =
171 container_of(work, struct mtdoops_context, work);
172
173 mtdoops_prepare(cxt);
174}
175
176static int find_next_position(struct mtdoops_context *cxt)
177{
178 struct mtd_info *mtd = cxt->mtd;
179 int page, maxpos = 0;
180 u32 count, maxcount = 0xffffffff;
181 size_t retlen;
182
183 for (page = 0; page < cxt->oops_pages; page++) {
184 mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count);
185 if (count == 0xffffffff)
186 continue;
187 if (maxcount == 0xffffffff) {
188 maxcount = count;
189 maxpos = page;
190 } else if ((count < 0x40000000) && (maxcount > 0xc0000000)) {
191 maxcount = count;
192 maxpos = page;
193 } else if ((count > maxcount) && (count < 0xc0000000)) {
194 maxcount = count;
195 maxpos = page;
196 } else if ((count > maxcount) && (count > 0xc0000000)
197 && (maxcount > 0x80000000)) {
198 maxcount = count;
199 maxpos = page;
200 }
201 }
202 if (maxcount == 0xffffffff) {
203 cxt->nextpage = 0;
204 cxt->nextcount = 1;
205 cxt->ready = 1;
206 printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n",
207 cxt->nextpage, cxt->nextcount);
208 return 0;
209 }
210
211 cxt->nextpage = maxpos;
212 cxt->nextcount = maxcount;
213
214 return mtdoops_inc_counter(cxt);
215}
216
217
218static void mtdoops_notify_add(struct mtd_info *mtd)
219{
220 struct mtdoops_context *cxt = &oops_cxt;
221 int ret;
222
223 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
224 return;
225
226 if (mtd->size < (mtd->erasesize * 2)) {
227 printk(KERN_ERR "MTD partition %d not big enough for mtdoops\n",
228 mtd->index);
229 return;
230 }
231
232 cxt->mtd = mtd;
233 cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE;
234
235 ret = find_next_position(cxt);
236 if (ret == 1)
237 mtdoops_prepare(cxt);
238
239 printk(KERN_DEBUG "mtdoops: Attached to MTD device %d\n", mtd->index);
240}
241
242static void mtdoops_notify_remove(struct mtd_info *mtd)
243{
244 struct mtdoops_context *cxt = &oops_cxt;
245
246 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
247 return;
248
249 cxt->mtd = NULL;
250 flush_scheduled_work();
251}
252
253
254static void
255mtdoops_console_write(struct console *co, const char *s, unsigned int count)
256{
257 struct mtdoops_context *cxt = co->data;
258 struct mtd_info *mtd = cxt->mtd;
259 int i, ret;
260
261 if (!cxt->ready || !mtd)
262 return;
263
264 if (!oops_in_progress && cxt->writecount != 0) {
265 size_t retlen;
266 if (cxt->writecount < OOPS_PAGE_SIZE)
267 memset(cxt->oops_buf + cxt->writecount, 0xff,
268 OOPS_PAGE_SIZE - cxt->writecount);
269
270 ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
271 OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
272 cxt->ready = 0;
273 cxt->writecount = 0;
274
275 if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
276 printk(KERN_ERR "mtdoops: Write failure at %d (%d of %d"
277 " written), err %d.\n",
278 cxt->nextpage * OOPS_PAGE_SIZE, retlen,
279 OOPS_PAGE_SIZE, ret);
280
281 ret = mtdoops_inc_counter(cxt);
282 if (ret == 1)
283 schedule_work(&cxt->work);
284 }
285
286 if (!oops_in_progress)
287 return;
288
289 if (cxt->writecount == 0) {
290 u32 *stamp = cxt->oops_buf;
291 *stamp = cxt->nextcount;
292 cxt->writecount = 4;
293 }
294
295 if ((count + cxt->writecount) > OOPS_PAGE_SIZE)
296 count = OOPS_PAGE_SIZE - cxt->writecount;
297
298 for (i = 0; i < count; i++, s++)
299 *((char *)(cxt->oops_buf) + cxt->writecount + i) = *s;
300
301 cxt->writecount = cxt->writecount + count;
302}
303
304static int __init mtdoops_console_setup(struct console *co, char *options)
305{
306 struct mtdoops_context *cxt = co->data;
307
308 if (cxt->mtd_index != -1)
309 return -EBUSY;
310 if (co->index == -1)
311 return -EINVAL;
312
313 cxt->mtd_index = co->index;
314 return 0;
315}
316
317static struct mtd_notifier mtdoops_notifier = {
318 .add = mtdoops_notify_add,
319 .remove = mtdoops_notify_remove,
320};
321
322static struct console mtdoops_console = {
323 .name = "ttyMTD",
324 .write = mtdoops_console_write,
325 .setup = mtdoops_console_setup,
326 .flags = CON_PRINTBUFFER,
327 .index = -1,
328 .data = &oops_cxt,
329};
330
331static int __init mtdoops_console_init(void)
332{
333 struct mtdoops_context *cxt = &oops_cxt;
334
335 cxt->mtd_index = -1;
336 cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE);
337
338 if (!cxt->oops_buf) {
339 printk(KERN_ERR "Failed to allocate oops buffer workspace\n");
340 return -ENOMEM;
341 }
342
343 INIT_WORK(&cxt->work, mtdoops_workfunc);
344
345 register_console(&mtdoops_console);
346 register_mtd_user(&mtdoops_notifier);
347 return 0;
348}
349
350static void __exit mtdoops_console_exit(void)
351{
352 struct mtdoops_context *cxt = &oops_cxt;
353
354 unregister_mtd_user(&mtdoops_notifier);
355 unregister_console(&mtdoops_console);
356 vfree(cxt->oops_buf);
357}
358
359
360subsys_initcall(mtdoops_console_init);
361module_exit(mtdoops_console_exit);
362
363MODULE_LICENSE("GPL");
364MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
365MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");