diff options
Diffstat (limited to 'drivers/mtd/mtdoops.c')
-rw-r--r-- | drivers/mtd/mtdoops.c | 74 |
1 files changed, 43 insertions, 31 deletions
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index b016eee18657..64772dc0ea2b 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c | |||
@@ -37,7 +37,11 @@ | |||
37 | #define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024) | 37 | #define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024) |
38 | 38 | ||
39 | #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00 | 39 | #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00 |
40 | #define OOPS_PAGE_SIZE 4096 | 40 | |
41 | static unsigned long record_size = 4096; | ||
42 | module_param(record_size, ulong, 0400); | ||
43 | MODULE_PARM_DESC(record_size, | ||
44 | "record size for MTD OOPS pages in bytes (default 4096)"); | ||
41 | 45 | ||
42 | static struct mtdoops_context { | 46 | static struct mtdoops_context { |
43 | int mtd_index; | 47 | int mtd_index; |
@@ -83,8 +87,8 @@ static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset) | |||
83 | { | 87 | { |
84 | struct mtd_info *mtd = cxt->mtd; | 88 | struct mtd_info *mtd = cxt->mtd; |
85 | u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize; | 89 | u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize; |
86 | u32 start_page = start_page_offset / OOPS_PAGE_SIZE; | 90 | u32 start_page = start_page_offset / record_size; |
87 | u32 erase_pages = mtd->erasesize / OOPS_PAGE_SIZE; | 91 | u32 erase_pages = mtd->erasesize / record_size; |
88 | struct erase_info erase; | 92 | struct erase_info erase; |
89 | DECLARE_WAITQUEUE(wait, current); | 93 | DECLARE_WAITQUEUE(wait, current); |
90 | wait_queue_head_t wait_q; | 94 | wait_queue_head_t wait_q; |
@@ -152,15 +156,15 @@ static void mtdoops_workfunc_erase(struct work_struct *work) | |||
152 | if (!mtd) | 156 | if (!mtd) |
153 | return; | 157 | return; |
154 | 158 | ||
155 | mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize; | 159 | mod = (cxt->nextpage * record_size) % mtd->erasesize; |
156 | if (mod != 0) { | 160 | if (mod != 0) { |
157 | cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE); | 161 | cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size); |
158 | if (cxt->nextpage >= cxt->oops_pages) | 162 | if (cxt->nextpage >= cxt->oops_pages) |
159 | cxt->nextpage = 0; | 163 | cxt->nextpage = 0; |
160 | } | 164 | } |
161 | 165 | ||
162 | while (mtd->block_isbad) { | 166 | while (mtd->block_isbad) { |
163 | ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); | 167 | ret = mtd->block_isbad(mtd, cxt->nextpage * record_size); |
164 | if (!ret) | 168 | if (!ret) |
165 | break; | 169 | break; |
166 | if (ret < 0) { | 170 | if (ret < 0) { |
@@ -168,20 +172,20 @@ static void mtdoops_workfunc_erase(struct work_struct *work) | |||
168 | return; | 172 | return; |
169 | } | 173 | } |
170 | badblock: | 174 | badblock: |
171 | printk(KERN_WARNING "mtdoops: bad block at %08x\n", | 175 | printk(KERN_WARNING "mtdoops: bad block at %08lx\n", |
172 | cxt->nextpage * OOPS_PAGE_SIZE); | 176 | cxt->nextpage * record_size); |
173 | i++; | 177 | i++; |
174 | cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE); | 178 | cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size); |
175 | if (cxt->nextpage >= cxt->oops_pages) | 179 | if (cxt->nextpage >= cxt->oops_pages) |
176 | cxt->nextpage = 0; | 180 | cxt->nextpage = 0; |
177 | if (i == cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE)) { | 181 | if (i == cxt->oops_pages / (mtd->erasesize / record_size)) { |
178 | printk(KERN_ERR "mtdoops: all blocks bad!\n"); | 182 | printk(KERN_ERR "mtdoops: all blocks bad!\n"); |
179 | return; | 183 | return; |
180 | } | 184 | } |
181 | } | 185 | } |
182 | 186 | ||
183 | for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) | 187 | for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) |
184 | ret = mtdoops_erase_block(cxt, cxt->nextpage * OOPS_PAGE_SIZE); | 188 | ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); |
185 | 189 | ||
186 | if (ret >= 0) { | 190 | if (ret >= 0) { |
187 | printk(KERN_DEBUG "mtdoops: ready %d, %d\n", | 191 | printk(KERN_DEBUG "mtdoops: ready %d, %d\n", |
@@ -191,7 +195,7 @@ badblock: | |||
191 | } | 195 | } |
192 | 196 | ||
193 | if (mtd->block_markbad && ret == -EIO) { | 197 | if (mtd->block_markbad && ret == -EIO) { |
194 | ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); | 198 | ret = mtd->block_markbad(mtd, cxt->nextpage * record_size); |
195 | if (ret < 0) { | 199 | if (ret < 0) { |
196 | printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); | 200 | printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); |
197 | return; | 201 | return; |
@@ -206,22 +210,22 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic) | |||
206 | size_t retlen; | 210 | size_t retlen; |
207 | int ret; | 211 | int ret; |
208 | 212 | ||
209 | if (cxt->writecount < OOPS_PAGE_SIZE) | 213 | if (cxt->writecount < record_size) |
210 | memset(cxt->oops_buf + cxt->writecount, 0xff, | 214 | memset(cxt->oops_buf + cxt->writecount, 0xff, |
211 | OOPS_PAGE_SIZE - cxt->writecount); | 215 | record_size - cxt->writecount); |
212 | 216 | ||
213 | if (panic) | 217 | if (panic) |
214 | ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, | 218 | ret = mtd->panic_write(mtd, cxt->nextpage * record_size, |
215 | OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); | 219 | record_size, &retlen, cxt->oops_buf); |
216 | else | 220 | else |
217 | ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, | 221 | ret = mtd->write(mtd, cxt->nextpage * record_size, |
218 | OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); | 222 | record_size, &retlen, cxt->oops_buf); |
219 | 223 | ||
220 | cxt->writecount = 0; | 224 | cxt->writecount = 0; |
221 | 225 | ||
222 | if (retlen != OOPS_PAGE_SIZE || ret < 0) | 226 | if (retlen != record_size || ret < 0) |
223 | printk(KERN_ERR "mtdoops: write failure at %d (%td of %d written), error %d\n", | 227 | printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n", |
224 | cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); | 228 | cxt->nextpage * record_size, retlen, record_size, ret); |
225 | mark_page_used(cxt, cxt->nextpage); | 229 | mark_page_used(cxt, cxt->nextpage); |
226 | 230 | ||
227 | mtdoops_inc_counter(cxt); | 231 | mtdoops_inc_counter(cxt); |
@@ -246,10 +250,10 @@ static void find_next_position(struct mtdoops_context *cxt) | |||
246 | for (page = 0; page < cxt->oops_pages; page++) { | 250 | for (page = 0; page < cxt->oops_pages; page++) { |
247 | /* Assume the page is used */ | 251 | /* Assume the page is used */ |
248 | mark_page_used(cxt, page); | 252 | mark_page_used(cxt, page); |
249 | ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]); | 253 | ret = mtd->read(mtd, page * record_size, 8, &retlen, (u_char *) &count[0]); |
250 | if (retlen != 8 || (ret < 0 && ret != -EUCLEAN)) { | 254 | if (retlen != 8 || (ret < 0 && ret != -EUCLEAN)) { |
251 | printk(KERN_ERR "mtdoops: read failure at %d (%td of 8 read), err %d\n", | 255 | printk(KERN_ERR "mtdoops: read failure at %ld (%td of 8 read), err %d\n", |
252 | page * OOPS_PAGE_SIZE, retlen, ret); | 256 | page * record_size, retlen, ret); |
253 | continue; | 257 | continue; |
254 | } | 258 | } |
255 | 259 | ||
@@ -293,7 +297,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd) | |||
293 | struct mtdoops_context *cxt = &oops_cxt; | 297 | struct mtdoops_context *cxt = &oops_cxt; |
294 | u64 mtdoops_pages = mtd->size; | 298 | u64 mtdoops_pages = mtd->size; |
295 | 299 | ||
296 | do_div(mtdoops_pages, OOPS_PAGE_SIZE); | 300 | do_div(mtdoops_pages, record_size); |
297 | 301 | ||
298 | if (cxt->name && !strcmp(mtd->name, cxt->name)) | 302 | if (cxt->name && !strcmp(mtd->name, cxt->name)) |
299 | cxt->mtd_index = mtd->index; | 303 | cxt->mtd_index = mtd->index; |
@@ -307,7 +311,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd) | |||
307 | return; | 311 | return; |
308 | } | 312 | } |
309 | 313 | ||
310 | if (mtd->erasesize < OOPS_PAGE_SIZE) { | 314 | if (mtd->erasesize < record_size) { |
311 | printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n", | 315 | printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n", |
312 | mtd->index); | 316 | mtd->index); |
313 | return; | 317 | return; |
@@ -328,7 +332,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd) | |||
328 | } | 332 | } |
329 | 333 | ||
330 | cxt->mtd = mtd; | 334 | cxt->mtd = mtd; |
331 | cxt->oops_pages = (int)mtd->size / OOPS_PAGE_SIZE; | 335 | cxt->oops_pages = (int)mtd->size / record_size; |
332 | find_next_position(cxt); | 336 | find_next_position(cxt); |
333 | printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); | 337 | printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); |
334 | } | 338 | } |
@@ -403,15 +407,15 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count) | |||
403 | cxt->writecount = 8; | 407 | cxt->writecount = 8; |
404 | } | 408 | } |
405 | 409 | ||
406 | if (count + cxt->writecount > OOPS_PAGE_SIZE) | 410 | if (count + cxt->writecount > record_size) |
407 | count = OOPS_PAGE_SIZE - cxt->writecount; | 411 | count = record_size - cxt->writecount; |
408 | 412 | ||
409 | memcpy(cxt->oops_buf + cxt->writecount, s, count); | 413 | memcpy(cxt->oops_buf + cxt->writecount, s, count); |
410 | cxt->writecount += count; | 414 | cxt->writecount += count; |
411 | 415 | ||
412 | spin_unlock_irqrestore(&cxt->writecount_lock, flags); | 416 | spin_unlock_irqrestore(&cxt->writecount_lock, flags); |
413 | 417 | ||
414 | if (cxt->writecount == OOPS_PAGE_SIZE) | 418 | if (cxt->writecount == record_size) |
415 | mtdoops_console_sync(); | 419 | mtdoops_console_sync(); |
416 | } | 420 | } |
417 | 421 | ||
@@ -450,8 +454,16 @@ static int __init mtdoops_console_init(void) | |||
450 | { | 454 | { |
451 | struct mtdoops_context *cxt = &oops_cxt; | 455 | struct mtdoops_context *cxt = &oops_cxt; |
452 | 456 | ||
457 | if ((record_size & 4095) != 0) { | ||
458 | printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n"); | ||
459 | return -EINVAL; | ||
460 | } | ||
461 | if (record_size < 4096) { | ||
462 | printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n"); | ||
463 | return -EINVAL; | ||
464 | } | ||
453 | cxt->mtd_index = -1; | 465 | cxt->mtd_index = -1; |
454 | cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); | 466 | cxt->oops_buf = vmalloc(record_size); |
455 | if (!cxt->oops_buf) { | 467 | if (!cxt->oops_buf) { |
456 | printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n"); | 468 | printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n"); |
457 | return -ENOMEM; | 469 | return -ENOMEM; |