diff options
Diffstat (limited to 'drivers/mtd/mtdoops.c')
-rw-r--r-- | drivers/mtd/mtdoops.c | 389 |
1 files changed, 200 insertions, 189 deletions
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 1060337c06df..a714ec482761 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c | |||
@@ -29,14 +29,34 @@ | |||
29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
30 | #include <linux/wait.h> | 30 | #include <linux/wait.h> |
31 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
34 | #include <linux/mtd/mtd.h> | 33 | #include <linux/mtd/mtd.h> |
34 | #include <linux/kmsg_dump.h> | ||
35 | |||
36 | /* Maximum MTD partition size */ | ||
37 | #define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024) | ||
35 | 38 | ||
36 | #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00 | 39 | #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00 |
37 | #define OOPS_PAGE_SIZE 4096 | 40 | #define MTDOOPS_HEADER_SIZE 8 |
41 | |||
42 | static unsigned long record_size = 4096; | ||
43 | module_param(record_size, ulong, 0400); | ||
44 | MODULE_PARM_DESC(record_size, | ||
45 | "record size for MTD OOPS pages in bytes (default 4096)"); | ||
46 | |||
47 | static char mtddev[80]; | ||
48 | module_param_string(mtddev, mtddev, 80, 0400); | ||
49 | MODULE_PARM_DESC(mtddev, | ||
50 | "name or index number of the MTD device to use"); | ||
51 | |||
52 | static int dump_oops = 1; | ||
53 | module_param(dump_oops, int, 0600); | ||
54 | MODULE_PARM_DESC(dump_oops, | ||
55 | "set to 1 to dump oopses, 0 to only dump panics (default 1)"); | ||
38 | 56 | ||
39 | static struct mtdoops_context { | 57 | static struct mtdoops_context { |
58 | struct kmsg_dumper dump; | ||
59 | |||
40 | int mtd_index; | 60 | int mtd_index; |
41 | struct work_struct work_erase; | 61 | struct work_struct work_erase; |
42 | struct work_struct work_write; | 62 | struct work_struct work_write; |
@@ -44,28 +64,43 @@ static struct mtdoops_context { | |||
44 | int oops_pages; | 64 | int oops_pages; |
45 | int nextpage; | 65 | int nextpage; |
46 | int nextcount; | 66 | int nextcount; |
47 | char *name; | 67 | unsigned long *oops_page_used; |
48 | 68 | ||
49 | void *oops_buf; | 69 | void *oops_buf; |
50 | |||
51 | /* writecount and disabling ready are spin lock protected */ | ||
52 | spinlock_t writecount_lock; | ||
53 | int ready; | ||
54 | int writecount; | ||
55 | } oops_cxt; | 70 | } oops_cxt; |
56 | 71 | ||
72 | static void mark_page_used(struct mtdoops_context *cxt, int page) | ||
73 | { | ||
74 | set_bit(page, cxt->oops_page_used); | ||
75 | } | ||
76 | |||
77 | static void mark_page_unused(struct mtdoops_context *cxt, int page) | ||
78 | { | ||
79 | clear_bit(page, cxt->oops_page_used); | ||
80 | } | ||
81 | |||
82 | static int page_is_used(struct mtdoops_context *cxt, int page) | ||
83 | { | ||
84 | return test_bit(page, cxt->oops_page_used); | ||
85 | } | ||
86 | |||
57 | static void mtdoops_erase_callback(struct erase_info *done) | 87 | static void mtdoops_erase_callback(struct erase_info *done) |
58 | { | 88 | { |
59 | wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; | 89 | wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; |
60 | wake_up(wait_q); | 90 | wake_up(wait_q); |
61 | } | 91 | } |
62 | 92 | ||
63 | static int mtdoops_erase_block(struct mtd_info *mtd, int offset) | 93 | static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset) |
64 | { | 94 | { |
95 | struct mtd_info *mtd = cxt->mtd; | ||
96 | u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize; | ||
97 | u32 start_page = start_page_offset / record_size; | ||
98 | u32 erase_pages = mtd->erasesize / record_size; | ||
65 | struct erase_info erase; | 99 | struct erase_info erase; |
66 | DECLARE_WAITQUEUE(wait, current); | 100 | DECLARE_WAITQUEUE(wait, current); |
67 | wait_queue_head_t wait_q; | 101 | wait_queue_head_t wait_q; |
68 | int ret; | 102 | int ret; |
103 | int page; | ||
69 | 104 | ||
70 | init_waitqueue_head(&wait_q); | 105 | init_waitqueue_head(&wait_q); |
71 | erase.mtd = mtd; | 106 | erase.mtd = mtd; |
@@ -81,25 +116,24 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset) | |||
81 | if (ret) { | 116 | if (ret) { |
82 | set_current_state(TASK_RUNNING); | 117 | set_current_state(TASK_RUNNING); |
83 | remove_wait_queue(&wait_q, &wait); | 118 | remove_wait_queue(&wait_q, &wait); |
84 | printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] " | 119 | printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n", |
85 | "on \"%s\" failed\n", | 120 | (unsigned long long)erase.addr, |
86 | (unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name); | 121 | (unsigned long long)erase.len, mtddev); |
87 | return ret; | 122 | return ret; |
88 | } | 123 | } |
89 | 124 | ||
90 | schedule(); /* Wait for erase to finish. */ | 125 | schedule(); /* Wait for erase to finish. */ |
91 | remove_wait_queue(&wait_q, &wait); | 126 | remove_wait_queue(&wait_q, &wait); |
92 | 127 | ||
128 | /* Mark pages as unused */ | ||
129 | for (page = start_page; page < start_page + erase_pages; page++) | ||
130 | mark_page_unused(cxt, page); | ||
131 | |||
93 | return 0; | 132 | return 0; |
94 | } | 133 | } |
95 | 134 | ||
96 | static void mtdoops_inc_counter(struct mtdoops_context *cxt) | 135 | static void mtdoops_inc_counter(struct mtdoops_context *cxt) |
97 | { | 136 | { |
98 | struct mtd_info *mtd = cxt->mtd; | ||
99 | size_t retlen; | ||
100 | u32 count; | ||
101 | int ret; | ||
102 | |||
103 | cxt->nextpage++; | 137 | cxt->nextpage++; |
104 | if (cxt->nextpage >= cxt->oops_pages) | 138 | if (cxt->nextpage >= cxt->oops_pages) |
105 | cxt->nextpage = 0; | 139 | cxt->nextpage = 0; |
@@ -107,25 +141,13 @@ static void mtdoops_inc_counter(struct mtdoops_context *cxt) | |||
107 | if (cxt->nextcount == 0xffffffff) | 141 | if (cxt->nextcount == 0xffffffff) |
108 | cxt->nextcount = 0; | 142 | cxt->nextcount = 0; |
109 | 143 | ||
110 | ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, | 144 | if (page_is_used(cxt, cxt->nextpage)) { |
111 | &retlen, (u_char *) &count); | ||
112 | if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) { | ||
113 | printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" | ||
114 | ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE, | ||
115 | retlen, ret); | ||
116 | schedule_work(&cxt->work_erase); | 145 | schedule_work(&cxt->work_erase); |
117 | return; | 146 | return; |
118 | } | 147 | } |
119 | 148 | ||
120 | /* See if we need to erase the next block */ | 149 | printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n", |
121 | if (count != 0xffffffff) { | 150 | cxt->nextpage, cxt->nextcount); |
122 | schedule_work(&cxt->work_erase); | ||
123 | return; | ||
124 | } | ||
125 | |||
126 | printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n", | ||
127 | cxt->nextpage, cxt->nextcount); | ||
128 | cxt->ready = 1; | ||
129 | } | 151 | } |
130 | 152 | ||
131 | /* Scheduled work - when we can't proceed without erasing a block */ | 153 | /* Scheduled work - when we can't proceed without erasing a block */ |
@@ -140,47 +162,47 @@ static void mtdoops_workfunc_erase(struct work_struct *work) | |||
140 | if (!mtd) | 162 | if (!mtd) |
141 | return; | 163 | return; |
142 | 164 | ||
143 | mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize; | 165 | mod = (cxt->nextpage * record_size) % mtd->erasesize; |
144 | if (mod != 0) { | 166 | if (mod != 0) { |
145 | cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE); | 167 | cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size); |
146 | if (cxt->nextpage >= cxt->oops_pages) | 168 | if (cxt->nextpage >= cxt->oops_pages) |
147 | cxt->nextpage = 0; | 169 | cxt->nextpage = 0; |
148 | } | 170 | } |
149 | 171 | ||
150 | while (mtd->block_isbad) { | 172 | while (mtd->block_isbad) { |
151 | ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); | 173 | ret = mtd->block_isbad(mtd, cxt->nextpage * record_size); |
152 | if (!ret) | 174 | if (!ret) |
153 | break; | 175 | break; |
154 | if (ret < 0) { | 176 | if (ret < 0) { |
155 | printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n"); | 177 | printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n"); |
156 | return; | 178 | return; |
157 | } | 179 | } |
158 | badblock: | 180 | badblock: |
159 | printk(KERN_WARNING "mtdoops: Bad block at %08x\n", | 181 | printk(KERN_WARNING "mtdoops: bad block at %08lx\n", |
160 | cxt->nextpage * OOPS_PAGE_SIZE); | 182 | cxt->nextpage * record_size); |
161 | i++; | 183 | i++; |
162 | cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE); | 184 | cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size); |
163 | if (cxt->nextpage >= cxt->oops_pages) | 185 | if (cxt->nextpage >= cxt->oops_pages) |
164 | cxt->nextpage = 0; | 186 | cxt->nextpage = 0; |
165 | if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) { | 187 | if (i == cxt->oops_pages / (mtd->erasesize / record_size)) { |
166 | printk(KERN_ERR "mtdoops: All blocks bad!\n"); | 188 | printk(KERN_ERR "mtdoops: all blocks bad!\n"); |
167 | return; | 189 | return; |
168 | } | 190 | } |
169 | } | 191 | } |
170 | 192 | ||
171 | for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) | 193 | for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) |
172 | ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); | 194 | ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); |
173 | 195 | ||
174 | if (ret >= 0) { | 196 | if (ret >= 0) { |
175 | printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); | 197 | printk(KERN_DEBUG "mtdoops: ready %d, %d\n", |
176 | cxt->ready = 1; | 198 | cxt->nextpage, cxt->nextcount); |
177 | return; | 199 | return; |
178 | } | 200 | } |
179 | 201 | ||
180 | if (mtd->block_markbad && (ret == -EIO)) { | 202 | if (mtd->block_markbad && ret == -EIO) { |
181 | ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); | 203 | ret = mtd->block_markbad(mtd, cxt->nextpage * record_size); |
182 | if (ret < 0) { | 204 | if (ret < 0) { |
183 | printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n"); | 205 | printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); |
184 | return; | 206 | return; |
185 | } | 207 | } |
186 | } | 208 | } |
@@ -191,36 +213,37 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic) | |||
191 | { | 213 | { |
192 | struct mtd_info *mtd = cxt->mtd; | 214 | struct mtd_info *mtd = cxt->mtd; |
193 | size_t retlen; | 215 | size_t retlen; |
216 | u32 *hdr; | ||
194 | int ret; | 217 | int ret; |
195 | 218 | ||
196 | if (cxt->writecount < OOPS_PAGE_SIZE) | 219 | /* Add mtdoops header to the buffer */ |
197 | memset(cxt->oops_buf + cxt->writecount, 0xff, | 220 | hdr = cxt->oops_buf; |
198 | OOPS_PAGE_SIZE - cxt->writecount); | 221 | hdr[0] = cxt->nextcount; |
222 | hdr[1] = MTDOOPS_KERNMSG_MAGIC; | ||
199 | 223 | ||
200 | if (panic) | 224 | if (panic) |
201 | ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, | 225 | ret = mtd->panic_write(mtd, cxt->nextpage * record_size, |
202 | OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); | 226 | record_size, &retlen, cxt->oops_buf); |
203 | else | 227 | else |
204 | ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, | 228 | ret = mtd->write(mtd, cxt->nextpage * record_size, |
205 | OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); | 229 | record_size, &retlen, cxt->oops_buf); |
206 | |||
207 | cxt->writecount = 0; | ||
208 | 230 | ||
209 | if ((retlen != OOPS_PAGE_SIZE) || (ret < 0)) | 231 | if (retlen != record_size || ret < 0) |
210 | printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n", | 232 | printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n", |
211 | cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); | 233 | cxt->nextpage * record_size, retlen, record_size, ret); |
234 | mark_page_used(cxt, cxt->nextpage); | ||
235 | memset(cxt->oops_buf, 0xff, record_size); | ||
212 | 236 | ||
213 | mtdoops_inc_counter(cxt); | 237 | mtdoops_inc_counter(cxt); |
214 | } | 238 | } |
215 | 239 | ||
216 | |||
217 | static void mtdoops_workfunc_write(struct work_struct *work) | 240 | static void mtdoops_workfunc_write(struct work_struct *work) |
218 | { | 241 | { |
219 | struct mtdoops_context *cxt = | 242 | struct mtdoops_context *cxt = |
220 | container_of(work, struct mtdoops_context, work_write); | 243 | container_of(work, struct mtdoops_context, work_write); |
221 | 244 | ||
222 | mtdoops_write(cxt, 0); | 245 | mtdoops_write(cxt, 0); |
223 | } | 246 | } |
224 | 247 | ||
225 | static void find_next_position(struct mtdoops_context *cxt) | 248 | static void find_next_position(struct mtdoops_context *cxt) |
226 | { | 249 | { |
@@ -230,28 +253,33 @@ static void find_next_position(struct mtdoops_context *cxt) | |||
230 | size_t retlen; | 253 | size_t retlen; |
231 | 254 | ||
232 | for (page = 0; page < cxt->oops_pages; page++) { | 255 | for (page = 0; page < cxt->oops_pages; page++) { |
233 | ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]); | 256 | /* Assume the page is used */ |
234 | if ((retlen != 8) || ((ret < 0) && (ret != -EUCLEAN))) { | 257 | mark_page_used(cxt, page); |
235 | printk(KERN_ERR "mtdoops: Read failure at %d (%td of 8 read)" | 258 | ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, |
236 | ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret); | 259 | &retlen, (u_char *) &count[0]); |
260 | if (retlen != MTDOOPS_HEADER_SIZE || | ||
261 | (ret < 0 && ret != -EUCLEAN)) { | ||
262 | printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n", | ||
263 | page * record_size, retlen, | ||
264 | MTDOOPS_HEADER_SIZE, ret); | ||
237 | continue; | 265 | continue; |
238 | } | 266 | } |
239 | 267 | ||
240 | if (count[1] != MTDOOPS_KERNMSG_MAGIC) | 268 | if (count[0] == 0xffffffff && count[1] == 0xffffffff) |
241 | continue; | 269 | mark_page_unused(cxt, page); |
242 | if (count[0] == 0xffffffff) | 270 | if (count[0] == 0xffffffff) |
243 | continue; | 271 | continue; |
244 | if (maxcount == 0xffffffff) { | 272 | if (maxcount == 0xffffffff) { |
245 | maxcount = count[0]; | 273 | maxcount = count[0]; |
246 | maxpos = page; | 274 | maxpos = page; |
247 | } else if ((count[0] < 0x40000000) && (maxcount > 0xc0000000)) { | 275 | } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) { |
248 | maxcount = count[0]; | 276 | maxcount = count[0]; |
249 | maxpos = page; | 277 | maxpos = page; |
250 | } else if ((count[0] > maxcount) && (count[0] < 0xc0000000)) { | 278 | } else if (count[0] > maxcount && count[0] < 0xc0000000) { |
251 | maxcount = count[0]; | 279 | maxcount = count[0]; |
252 | maxpos = page; | 280 | maxpos = page; |
253 | } else if ((count[0] > maxcount) && (count[0] > 0xc0000000) | 281 | } else if (count[0] > maxcount && count[0] > 0xc0000000 |
254 | && (maxcount > 0x80000000)) { | 282 | && maxcount > 0x80000000) { |
255 | maxcount = count[0]; | 283 | maxcount = count[0]; |
256 | maxpos = page; | 284 | maxpos = page; |
257 | } | 285 | } |
@@ -269,187 +297,170 @@ static void find_next_position(struct mtdoops_context *cxt) | |||
269 | mtdoops_inc_counter(cxt); | 297 | mtdoops_inc_counter(cxt); |
270 | } | 298 | } |
271 | 299 | ||
272 | 300 | static void mtdoops_do_dump(struct kmsg_dumper *dumper, | |
273 | static void mtdoops_notify_add(struct mtd_info *mtd) | 301 | enum kmsg_dump_reason reason, const char *s1, unsigned long l1, |
302 | const char *s2, unsigned long l2) | ||
274 | { | 303 | { |
275 | struct mtdoops_context *cxt = &oops_cxt; | 304 | struct mtdoops_context *cxt = container_of(dumper, |
305 | struct mtdoops_context, dump); | ||
306 | unsigned long s1_start, s2_start; | ||
307 | unsigned long l1_cpy, l2_cpy; | ||
308 | char *dst; | ||
309 | |||
310 | /* Only dump oopses if dump_oops is set */ | ||
311 | if (reason == KMSG_DUMP_OOPS && !dump_oops) | ||
312 | return; | ||
276 | 313 | ||
277 | if (cxt->name && !strcmp(mtd->name, cxt->name)) | 314 | dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */ |
278 | cxt->mtd_index = mtd->index; | 315 | l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE); |
316 | l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy); | ||
279 | 317 | ||
280 | if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) | 318 | s2_start = l2 - l2_cpy; |
281 | return; | 319 | s1_start = l1 - l1_cpy; |
282 | 320 | ||
283 | if (mtd->size < (mtd->erasesize * 2)) { | 321 | memcpy(dst, s1 + s1_start, l1_cpy); |
284 | printk(KERN_ERR "MTD partition %d not big enough for mtdoops\n", | 322 | memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy); |
285 | mtd->index); | ||
286 | return; | ||
287 | } | ||
288 | 323 | ||
289 | if (mtd->erasesize < OOPS_PAGE_SIZE) { | 324 | /* Panics must be written immediately */ |
290 | printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n", | 325 | if (reason == KMSG_DUMP_PANIC) { |
291 | mtd->index); | 326 | if (!cxt->mtd->panic_write) |
327 | printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n"); | ||
328 | else | ||
329 | mtdoops_write(cxt, 1); | ||
292 | return; | 330 | return; |
293 | } | 331 | } |
294 | 332 | ||
295 | cxt->mtd = mtd; | 333 | /* For other cases, schedule work to write it "nicely" */ |
296 | if (mtd->size > INT_MAX) | 334 | schedule_work(&cxt->work_write); |
297 | cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE; | ||
298 | else | ||
299 | cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE; | ||
300 | |||
301 | find_next_position(cxt); | ||
302 | |||
303 | printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); | ||
304 | } | 335 | } |
305 | 336 | ||
306 | static void mtdoops_notify_remove(struct mtd_info *mtd) | 337 | static void mtdoops_notify_add(struct mtd_info *mtd) |
307 | { | 338 | { |
308 | struct mtdoops_context *cxt = &oops_cxt; | 339 | struct mtdoops_context *cxt = &oops_cxt; |
340 | u64 mtdoops_pages = div_u64(mtd->size, record_size); | ||
341 | int err; | ||
309 | 342 | ||
310 | if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) | 343 | if (!strcmp(mtd->name, mtddev)) |
311 | return; | 344 | cxt->mtd_index = mtd->index; |
312 | |||
313 | cxt->mtd = NULL; | ||
314 | flush_scheduled_work(); | ||
315 | } | ||
316 | |||
317 | static void mtdoops_console_sync(void) | ||
318 | { | ||
319 | struct mtdoops_context *cxt = &oops_cxt; | ||
320 | struct mtd_info *mtd = cxt->mtd; | ||
321 | unsigned long flags; | ||
322 | 345 | ||
323 | if (!cxt->ready || !mtd || cxt->writecount == 0) | 346 | if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) |
324 | return; | 347 | return; |
325 | 348 | ||
326 | /* | 349 | if (mtd->size < mtd->erasesize * 2) { |
327 | * Once ready is 0 and we've held the lock no further writes to the | 350 | printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n", |
328 | * buffer will happen | 351 | mtd->index); |
329 | */ | ||
330 | spin_lock_irqsave(&cxt->writecount_lock, flags); | ||
331 | if (!cxt->ready) { | ||
332 | spin_unlock_irqrestore(&cxt->writecount_lock, flags); | ||
333 | return; | 352 | return; |
334 | } | 353 | } |
335 | cxt->ready = 0; | 354 | if (mtd->erasesize < record_size) { |
336 | spin_unlock_irqrestore(&cxt->writecount_lock, flags); | 355 | printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n", |
337 | 356 | mtd->index); | |
338 | if (mtd->panic_write && in_interrupt()) | ||
339 | /* Interrupt context, we're going to panic so try and log */ | ||
340 | mtdoops_write(cxt, 1); | ||
341 | else | ||
342 | schedule_work(&cxt->work_write); | ||
343 | } | ||
344 | |||
345 | static void | ||
346 | mtdoops_console_write(struct console *co, const char *s, unsigned int count) | ||
347 | { | ||
348 | struct mtdoops_context *cxt = co->data; | ||
349 | struct mtd_info *mtd = cxt->mtd; | ||
350 | unsigned long flags; | ||
351 | |||
352 | if (!oops_in_progress) { | ||
353 | mtdoops_console_sync(); | ||
354 | return; | 357 | return; |
355 | } | 358 | } |
356 | 359 | if (mtd->size > MTDOOPS_MAX_MTD_SIZE) { | |
357 | if (!cxt->ready || !mtd) | 360 | printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n", |
361 | mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024); | ||
358 | return; | 362 | return; |
363 | } | ||
359 | 364 | ||
360 | /* Locking on writecount ensures sequential writes to the buffer */ | 365 | /* oops_page_used is a bit field */ |
361 | spin_lock_irqsave(&cxt->writecount_lock, flags); | 366 | cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages, |
362 | 367 | BITS_PER_LONG)); | |
363 | /* Check ready status didn't change whilst waiting for the lock */ | 368 | if (!cxt->oops_page_used) { |
364 | if (!cxt->ready) { | 369 | printk(KERN_ERR "mtdoops: could not allocate page array\n"); |
365 | spin_unlock_irqrestore(&cxt->writecount_lock, flags); | ||
366 | return; | 370 | return; |
367 | } | 371 | } |
368 | 372 | ||
369 | if (cxt->writecount == 0) { | 373 | cxt->dump.dump = mtdoops_do_dump; |
370 | u32 *stamp = cxt->oops_buf; | 374 | err = kmsg_dump_register(&cxt->dump); |
371 | *stamp++ = cxt->nextcount; | 375 | if (err) { |
372 | *stamp = MTDOOPS_KERNMSG_MAGIC; | 376 | printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err); |
373 | cxt->writecount = 8; | 377 | vfree(cxt->oops_page_used); |
378 | cxt->oops_page_used = NULL; | ||
379 | return; | ||
374 | } | 380 | } |
375 | 381 | ||
376 | if ((count + cxt->writecount) > OOPS_PAGE_SIZE) | 382 | cxt->mtd = mtd; |
377 | count = OOPS_PAGE_SIZE - cxt->writecount; | 383 | cxt->oops_pages = (int)mtd->size / record_size; |
378 | 384 | find_next_position(cxt); | |
379 | memcpy(cxt->oops_buf + cxt->writecount, s, count); | 385 | printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); |
380 | cxt->writecount += count; | ||
381 | |||
382 | spin_unlock_irqrestore(&cxt->writecount_lock, flags); | ||
383 | |||
384 | if (cxt->writecount == OOPS_PAGE_SIZE) | ||
385 | mtdoops_console_sync(); | ||
386 | } | 386 | } |
387 | 387 | ||
388 | static int __init mtdoops_console_setup(struct console *co, char *options) | 388 | static void mtdoops_notify_remove(struct mtd_info *mtd) |
389 | { | 389 | { |
390 | struct mtdoops_context *cxt = co->data; | 390 | struct mtdoops_context *cxt = &oops_cxt; |
391 | 391 | ||
392 | if (cxt->mtd_index != -1 || cxt->name) | 392 | if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) |
393 | return -EBUSY; | 393 | return; |
394 | if (options) { | ||
395 | cxt->name = kstrdup(options, GFP_KERNEL); | ||
396 | return 0; | ||
397 | } | ||
398 | if (co->index == -1) | ||
399 | return -EINVAL; | ||
400 | 394 | ||
401 | cxt->mtd_index = co->index; | 395 | if (kmsg_dump_unregister(&cxt->dump) < 0) |
402 | return 0; | 396 | printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n"); |
397 | |||
398 | cxt->mtd = NULL; | ||
399 | flush_scheduled_work(); | ||
403 | } | 400 | } |
404 | 401 | ||
402 | |||
405 | static struct mtd_notifier mtdoops_notifier = { | 403 | static struct mtd_notifier mtdoops_notifier = { |
406 | .add = mtdoops_notify_add, | 404 | .add = mtdoops_notify_add, |
407 | .remove = mtdoops_notify_remove, | 405 | .remove = mtdoops_notify_remove, |
408 | }; | 406 | }; |
409 | 407 | ||
410 | static struct console mtdoops_console = { | 408 | static int __init mtdoops_init(void) |
411 | .name = "ttyMTD", | ||
412 | .write = mtdoops_console_write, | ||
413 | .setup = mtdoops_console_setup, | ||
414 | .unblank = mtdoops_console_sync, | ||
415 | .index = -1, | ||
416 | .data = &oops_cxt, | ||
417 | }; | ||
418 | |||
419 | static int __init mtdoops_console_init(void) | ||
420 | { | 409 | { |
421 | struct mtdoops_context *cxt = &oops_cxt; | 410 | struct mtdoops_context *cxt = &oops_cxt; |
411 | int mtd_index; | ||
412 | char *endp; | ||
422 | 413 | ||
414 | if (strlen(mtddev) == 0) { | ||
415 | printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n"); | ||
416 | return -EINVAL; | ||
417 | } | ||
418 | if ((record_size & 4095) != 0) { | ||
419 | printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n"); | ||
420 | return -EINVAL; | ||
421 | } | ||
422 | if (record_size < 4096) { | ||
423 | printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n"); | ||
424 | return -EINVAL; | ||
425 | } | ||
426 | |||
427 | /* Setup the MTD device to use */ | ||
423 | cxt->mtd_index = -1; | 428 | cxt->mtd_index = -1; |
424 | cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); | 429 | mtd_index = simple_strtoul(mtddev, &endp, 0); |
425 | spin_lock_init(&cxt->writecount_lock); | 430 | if (*endp == '\0') |
431 | cxt->mtd_index = mtd_index; | ||
432 | if (cxt->mtd_index > MAX_MTD_DEVICES) { | ||
433 | printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n", | ||
434 | mtd_index); | ||
435 | return -EINVAL; | ||
436 | } | ||
426 | 437 | ||
438 | cxt->oops_buf = vmalloc(record_size); | ||
427 | if (!cxt->oops_buf) { | 439 | if (!cxt->oops_buf) { |
428 | printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n"); | 440 | printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n"); |
429 | return -ENOMEM; | 441 | return -ENOMEM; |
430 | } | 442 | } |
443 | memset(cxt->oops_buf, 0xff, record_size); | ||
431 | 444 | ||
432 | INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); | 445 | INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); |
433 | INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); | 446 | INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); |
434 | 447 | ||
435 | register_console(&mtdoops_console); | ||
436 | register_mtd_user(&mtdoops_notifier); | 448 | register_mtd_user(&mtdoops_notifier); |
437 | return 0; | 449 | return 0; |
438 | } | 450 | } |
439 | 451 | ||
440 | static void __exit mtdoops_console_exit(void) | 452 | static void __exit mtdoops_exit(void) |
441 | { | 453 | { |
442 | struct mtdoops_context *cxt = &oops_cxt; | 454 | struct mtdoops_context *cxt = &oops_cxt; |
443 | 455 | ||
444 | unregister_mtd_user(&mtdoops_notifier); | 456 | unregister_mtd_user(&mtdoops_notifier); |
445 | unregister_console(&mtdoops_console); | ||
446 | kfree(cxt->name); | ||
447 | vfree(cxt->oops_buf); | 457 | vfree(cxt->oops_buf); |
458 | vfree(cxt->oops_page_used); | ||
448 | } | 459 | } |
449 | 460 | ||
450 | 461 | ||
451 | subsys_initcall(mtdoops_console_init); | 462 | module_init(mtdoops_init); |
452 | module_exit(mtdoops_console_exit); | 463 | module_exit(mtdoops_exit); |
453 | 464 | ||
454 | MODULE_LICENSE("GPL"); | 465 | MODULE_LICENSE("GPL"); |
455 | MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>"); | 466 | MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>"); |