diff options
| author | Simon Kagstrom <simon.kagstrom@netinsight.net> | 2009-10-29 08:41:11 -0400 |
|---|---|---|
| committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-11-30 07:01:59 -0500 |
| commit | be95745f01677245a061a8f51473ef5ec8ad008e (patch) | |
| tree | 404004e0036b4e558aa47145e8c4b57b2079a343 /drivers/mtd | |
| parent | a15b124fc4f15b2c4fc51669c936a30ce179d1f7 (diff) | |
mtd: mtdoops: keep track of used/unused pages in an array
This patch makes mtdoops keep track of used/unused pages in an array
instead of scanning the flash after a write. The advantage with this
approach is that it avoids calling mtd->read on a panic, which is not
possible for all mtd drivers.
Signed-off-by: Simon Kagstrom <simon.kagstrom@netinsight.net>
Reviewed-by: Anders Grafstrom <anders.grafstrom@netinsight.net>
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/mtd')
| -rw-r--r-- | drivers/mtd/mtdoops.c | 62 |
1 files changed, 44 insertions, 18 deletions
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index c383add060d8..06c538249455 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c | |||
| @@ -44,6 +44,7 @@ static struct mtdoops_context { | |||
| 44 | int oops_pages; | 44 | int oops_pages; |
| 45 | int nextpage; | 45 | int nextpage; |
| 46 | int nextcount; | 46 | int nextcount; |
| 47 | unsigned long *oops_page_used; | ||
| 47 | char *name; | 48 | char *name; |
| 48 | 49 | ||
| 49 | void *oops_buf; | 50 | void *oops_buf; |
| @@ -54,18 +55,38 @@ static struct mtdoops_context { | |||
| 54 | int writecount; | 55 | int writecount; |
| 55 | } oops_cxt; | 56 | } oops_cxt; |
| 56 | 57 | ||
| 58 | static void mark_page_used(struct mtdoops_context *cxt, int page) | ||
| 59 | { | ||
| 60 | set_bit(page, cxt->oops_page_used); | ||
| 61 | } | ||
| 62 | |||
| 63 | static void mark_page_unused(struct mtdoops_context *cxt, int page) | ||
| 64 | { | ||
| 65 | clear_bit(page, cxt->oops_page_used); | ||
| 66 | } | ||
| 67 | |||
| 68 | static int page_is_used(struct mtdoops_context *cxt, int page) | ||
| 69 | { | ||
| 70 | return test_bit(page, cxt->oops_page_used); | ||
| 71 | } | ||
| 72 | |||
| 57 | static void mtdoops_erase_callback(struct erase_info *done) | 73 | static void mtdoops_erase_callback(struct erase_info *done) |
| 58 | { | 74 | { |
| 59 | wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; | 75 | wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; |
| 60 | wake_up(wait_q); | 76 | wake_up(wait_q); |
| 61 | } | 77 | } |
| 62 | 78 | ||
| 63 | static int mtdoops_erase_block(struct mtd_info *mtd, int offset) | 79 | static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset) |
| 64 | { | 80 | { |
| 81 | struct mtd_info *mtd = cxt->mtd; | ||
| 82 | u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize; | ||
| 83 | u32 start_page = start_page_offset / OOPS_PAGE_SIZE; | ||
| 84 | u32 erase_pages = mtd->erasesize / OOPS_PAGE_SIZE; | ||
| 65 | struct erase_info erase; | 85 | struct erase_info erase; |
| 66 | DECLARE_WAITQUEUE(wait, current); | 86 | DECLARE_WAITQUEUE(wait, current); |
| 67 | wait_queue_head_t wait_q; | 87 | wait_queue_head_t wait_q; |
| 68 | int ret; | 88 | int ret; |
| 89 | int page; | ||
| 69 | 90 | ||
| 70 | init_waitqueue_head(&wait_q); | 91 | init_waitqueue_head(&wait_q); |
| 71 | erase.mtd = mtd; | 92 | erase.mtd = mtd; |
| @@ -90,16 +111,15 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset) | |||
| 90 | schedule(); /* Wait for erase to finish. */ | 111 | schedule(); /* Wait for erase to finish. */ |
| 91 | remove_wait_queue(&wait_q, &wait); | 112 | remove_wait_queue(&wait_q, &wait); |
| 92 | 113 | ||
| 114 | /* Mark pages as unused */ | ||
| 115 | for (page = start_page; page < start_page + erase_pages; page++) | ||
| 116 | mark_page_unused(cxt, page); | ||
| 117 | |||
| 93 | return 0; | 118 | return 0; |
| 94 | } | 119 | } |
| 95 | 120 | ||
| 96 | static void mtdoops_inc_counter(struct mtdoops_context *cxt) | 121 | static void mtdoops_inc_counter(struct mtdoops_context *cxt) |
| 97 | { | 122 | { |
| 98 | struct mtd_info *mtd = cxt->mtd; | ||
| 99 | size_t retlen; | ||
| 100 | u32 count; | ||
| 101 | int ret; | ||
| 102 | |||
| 103 | cxt->nextpage++; | 123 | cxt->nextpage++; |
| 104 | if (cxt->nextpage >= cxt->oops_pages) | 124 | if (cxt->nextpage >= cxt->oops_pages) |
| 105 | cxt->nextpage = 0; | 125 | cxt->nextpage = 0; |
| @@ -107,17 +127,7 @@ static void mtdoops_inc_counter(struct mtdoops_context *cxt) | |||
| 107 | if (cxt->nextcount == 0xffffffff) | 127 | if (cxt->nextcount == 0xffffffff) |
| 108 | cxt->nextcount = 0; | 128 | cxt->nextcount = 0; |
| 109 | 129 | ||
| 110 | ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, | 130 | if (page_is_used(cxt, cxt->nextpage)) { |
| 111 | &retlen, (u_char *) &count); | ||
| 112 | if (retlen != 4 || (ret < 0 && ret != -EUCLEAN)) { | ||
| 113 | printk(KERN_ERR "mtdoops: read failure at %d (%td of 4 read), err %d\n", | ||
| 114 | cxt->nextpage * OOPS_PAGE_SIZE, retlen, ret); | ||
| 115 | schedule_work(&cxt->work_erase); | ||
| 116 | return; | ||
| 117 | } | ||
| 118 | |||
| 119 | /* See if we need to erase the next block */ | ||
| 120 | if (count != 0xffffffff) { | ||
| 121 | schedule_work(&cxt->work_erase); | 131 | schedule_work(&cxt->work_erase); |
| 122 | return; | 132 | return; |
| 123 | } | 133 | } |
| @@ -168,7 +178,7 @@ badblock: | |||
| 168 | } | 178 | } |
| 169 | 179 | ||
| 170 | for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) | 180 | for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) |
| 171 | ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); | 181 | ret = mtdoops_erase_block(cxt, cxt->nextpage * OOPS_PAGE_SIZE); |
| 172 | 182 | ||
| 173 | if (ret >= 0) { | 183 | if (ret >= 0) { |
| 174 | printk(KERN_DEBUG "mtdoops: ready %d, %d\n", | 184 | printk(KERN_DEBUG "mtdoops: ready %d, %d\n", |
| @@ -209,6 +219,7 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic) | |||
| 209 | if (retlen != OOPS_PAGE_SIZE || ret < 0) | 219 | if (retlen != OOPS_PAGE_SIZE || ret < 0) |
| 210 | printk(KERN_ERR "mtdoops: write failure at %d (%td of %d written), error %d\n", | 220 | printk(KERN_ERR "mtdoops: write failure at %d (%td of %d written), error %d\n", |
| 211 | cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); | 221 | cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); |
| 222 | mark_page_used(cxt, cxt->nextpage); | ||
| 212 | 223 | ||
| 213 | mtdoops_inc_counter(cxt); | 224 | mtdoops_inc_counter(cxt); |
| 214 | } | 225 | } |
| @@ -230,6 +241,8 @@ static void find_next_position(struct mtdoops_context *cxt) | |||
| 230 | size_t retlen; | 241 | size_t retlen; |
| 231 | 242 | ||
| 232 | for (page = 0; page < cxt->oops_pages; page++) { | 243 | for (page = 0; page < cxt->oops_pages; page++) { |
| 244 | /* Assume the page is used */ | ||
| 245 | mark_page_used(cxt, page); | ||
| 233 | ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]); | 246 | ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]); |
| 234 | if (retlen != 8 || (ret < 0 && ret != -EUCLEAN)) { | 247 | if (retlen != 8 || (ret < 0 && ret != -EUCLEAN)) { |
| 235 | printk(KERN_ERR "mtdoops: read failure at %d (%td of 8 read), err %d\n", | 248 | printk(KERN_ERR "mtdoops: read failure at %d (%td of 8 read), err %d\n", |
| @@ -237,6 +250,8 @@ static void find_next_position(struct mtdoops_context *cxt) | |||
| 237 | continue; | 250 | continue; |
| 238 | } | 251 | } |
| 239 | 252 | ||
| 253 | if (count[0] == 0xffffffff && count[1] == 0xffffffff) | ||
| 254 | mark_page_unused(cxt, page); | ||
| 240 | if (count[1] != MTDOOPS_KERNMSG_MAGIC) | 255 | if (count[1] != MTDOOPS_KERNMSG_MAGIC) |
| 241 | continue; | 256 | continue; |
| 242 | if (count[0] == 0xffffffff) | 257 | if (count[0] == 0xffffffff) |
| @@ -273,6 +288,9 @@ static void find_next_position(struct mtdoops_context *cxt) | |||
| 273 | static void mtdoops_notify_add(struct mtd_info *mtd) | 288 | static void mtdoops_notify_add(struct mtd_info *mtd) |
| 274 | { | 289 | { |
| 275 | struct mtdoops_context *cxt = &oops_cxt; | 290 | struct mtdoops_context *cxt = &oops_cxt; |
| 291 | u64 mtdoops_pages = mtd->size; | ||
| 292 | |||
| 293 | do_div(mtdoops_pages, OOPS_PAGE_SIZE); | ||
| 276 | 294 | ||
| 277 | if (cxt->name && !strcmp(mtd->name, cxt->name)) | 295 | if (cxt->name && !strcmp(mtd->name, cxt->name)) |
| 278 | cxt->mtd_index = mtd->index; | 296 | cxt->mtd_index = mtd->index; |
| @@ -292,6 +310,13 @@ static void mtdoops_notify_add(struct mtd_info *mtd) | |||
| 292 | return; | 310 | return; |
| 293 | } | 311 | } |
| 294 | 312 | ||
| 313 | /* oops_page_used is a bit field */ | ||
| 314 | cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages, | ||
| 315 | BITS_PER_LONG)); | ||
| 316 | if (!cxt->oops_page_used) { | ||
| 317 | printk(KERN_ERR "Could not allocate page array\n"); | ||
| 318 | return; | ||
| 319 | } | ||
| 295 | cxt->mtd = mtd; | 320 | cxt->mtd = mtd; |
| 296 | if (mtd->size > INT_MAX) | 321 | if (mtd->size > INT_MAX) |
| 297 | cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE; | 322 | cxt->oops_pages = INT_MAX / OOPS_PAGE_SIZE; |
| @@ -444,6 +469,7 @@ static void __exit mtdoops_console_exit(void) | |||
| 444 | unregister_console(&mtdoops_console); | 469 | unregister_console(&mtdoops_console); |
| 445 | kfree(cxt->name); | 470 | kfree(cxt->name); |
| 446 | vfree(cxt->oops_buf); | 471 | vfree(cxt->oops_buf); |
| 472 | vfree(cxt->oops_page_used); | ||
| 447 | } | 473 | } |
| 448 | 474 | ||
| 449 | 475 | ||
