aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/sm_ftl.c
diff options
context:
space:
mode:
authorMaxim Levitsky <maximlevitsky@gmail.com>2010-02-22 13:39:41 -0500
committerDavid Woodhouse <David.Woodhouse@intel.com>2010-02-26 14:06:50 -0500
commit7d17c02a01a111f40986859f044c8c4cce8a4aa6 (patch)
treeb9fec0a8ad3073e3d7ca442fc1db17b054974e66 /drivers/mtd/sm_ftl.c
parenta7790532f5b7358c33a6b1834dc2b318de209f31 (diff)
mtd: Add new SmartMedia/xD FTL
This implements new readwrite SmartMedia/xd FTL. mtd driver must have support proper ECC and badblock verification based on oob parts for 512 bytes nand. Also mtd driver must define read_oob and write_oob, which are used to read and write both data and oob together. Signed-off-by: Maxim Levitsky <maximlevitsky@gmail.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/mtd/sm_ftl.c')
-rw-r--r--drivers/mtd/sm_ftl.c1284
1 files changed, 1284 insertions, 0 deletions
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
new file mode 100644
index 000000000000..a59ebb48cae1
--- /dev/null
+++ b/drivers/mtd/sm_ftl.c
@@ -0,0 +1,1284 @@
1/*
2 * Copyright © 2009 - Maxim Levitsky
3 * SmartMedia/xD translation layer
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/random.h>
13#include <linux/hdreg.h>
14#include <linux/kthread.h>
15#include <linux/freezer.h>
16#include <linux/sysfs.h>
17#include <linux/bitops.h>
18#include "nand/sm_common.h"
19#include "sm_ftl.h"
20
21#ifdef CONFIG_SM_FTL_MUSEUM
22#include <linux/mtd/nand_ecc.h>
23#endif
24
25
26struct workqueue_struct *cache_flush_workqueue;
27
28static int cache_timeout = 1000;
29module_param(cache_timeout, bool, S_IRUGO);
30MODULE_PARM_DESC(cache_timeout,
31 "Timeout (in ms) for cache flush (1000 ms default");
32
33static int debug;
34module_param(debug, int, S_IRUGO | S_IWUSR);
35MODULE_PARM_DESC(debug, "Debug level (0-2)");
36
37
38/* ------------------- sysfs attributtes ---------------------------------- */
39struct sm_sysfs_attribute {
40 struct device_attribute dev_attr;
41 char *data;
42 int len;
43};
44
45ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
46 char *buf)
47{
48 struct sm_sysfs_attribute *sm_attr =
49 container_of(attr, struct sm_sysfs_attribute, dev_attr);
50
51 strncpy(buf, sm_attr->data, sm_attr->len);
52 return sm_attr->len;
53}
54
55
56#define NUM_ATTRIBUTES 1
57#define SM_CIS_VENDOR_OFFSET 0x59
58struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
59{
60 struct attribute_group *attr_group;
61 struct attribute **attributes;
62 struct sm_sysfs_attribute *vendor_attribute;
63
64 int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
65 SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET);
66
67 char *vendor = kmalloc(vendor_len, GFP_KERNEL);
68 memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len);
69 vendor[vendor_len] = 0;
70
71 /* Initialize sysfs attributes */
72 vendor_attribute =
73 kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
74
75 vendor_attribute->data = vendor;
76 vendor_attribute->len = vendor_len;
77 vendor_attribute->dev_attr.attr.name = "vendor";
78 vendor_attribute->dev_attr.attr.mode = S_IRUGO;
79 vendor_attribute->dev_attr.show = sm_attr_show;
80
81
82 /* Create array of pointers to the attributes */
83 attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1),
84 GFP_KERNEL);
85 attributes[0] = &vendor_attribute->dev_attr.attr;
86
87 /* Finally create the attribute group */
88 attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
89 attr_group->attrs = attributes;
90 return attr_group;
91}
92
93void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
94{
95 struct attribute **attributes = ftl->disk_attributes->attrs;
96 int i;
97
98 for (i = 0; attributes[i] ; i++) {
99
100 struct device_attribute *dev_attr = container_of(attributes[i],
101 struct device_attribute, attr);
102
103 struct sm_sysfs_attribute *sm_attr =
104 container_of(dev_attr,
105 struct sm_sysfs_attribute, dev_attr);
106
107 kfree(sm_attr->data);
108 kfree(sm_attr);
109 }
110
111 kfree(ftl->disk_attributes->attrs);
112 kfree(ftl->disk_attributes);
113}
114
115
116/* ----------------------- oob helpers -------------------------------------- */
117
118static int sm_get_lba(uint8_t *lba)
119{
120 /* check fixed bits */
121 if ((lba[0] & 0xF8) != 0x10)
122 return -2;
123
124 /* check parity - endianess doesn't matter */
125 if (hweight16(*(uint16_t *)lba) & 1)
126 return -2;
127
128 return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
129}
130
131
132/*
133 * Read LBA asscociated with block
134 * returns -1, if block is erased
135 * returns -2 if error happens
136 */
137static int sm_read_lba(struct sm_oob *oob)
138{
139 static const uint32_t erased_pattern[4] = {
140 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
141
142 uint16_t lba_test;
143 int lba;
144
145 /* First test for erased block */
146 if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
147 return -1;
148
149 /* Now check is both copies of the LBA differ too much */
150 lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
151 if (lba_test && !is_power_of_2(lba_test))
152 return -2;
153
154 /* And read it */
155 lba = sm_get_lba(oob->lba_copy1);
156
157 if (lba == -2)
158 lba = sm_get_lba(oob->lba_copy2);
159
160 return lba;
161}
162
163static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
164{
165 uint8_t tmp[2];
166
167 WARN_ON(lba >= 1000);
168
169 tmp[0] = 0x10 | ((lba >> 7) & 0x07);
170 tmp[1] = (lba << 1) & 0xFF;
171
172 if (hweight16(*(uint16_t *)tmp) & 0x01)
173 tmp[1] |= 1;
174
175 oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
176 oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
177}
178
179
180/* Make offset from parts */
181static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
182{
183 WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
184 WARN_ON(zone < 0 || zone >= ftl->zone_count);
185 WARN_ON(block >= ftl->zone_size);
186 WARN_ON(boffset >= ftl->block_size);
187
188 if (block == -1)
189 return -1;
190
191 return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
192}
193
194/* Breaks offset into parts */
195static void sm_break_offset(struct sm_ftl *ftl, loff_t offset,
196 int *zone, int *block, int *boffset)
197{
198 *boffset = do_div(offset, ftl->block_size);
199 *block = do_div(offset, ftl->max_lba);
200 *zone = offset >= ftl->zone_count ? -1 : offset;
201}
202
203/* ---------------------- low level IO ------------------------------------- */
204
205static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
206{
207#ifdef CONFIG_SM_FTL_MUSEUM
208 uint8_t ecc[3];
209
210 __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
211 if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE) < 0)
212 return -EIO;
213
214 buffer += SM_SMALL_PAGE;
215
216 __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
217 if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE) < 0)
218 return -EIO;
219#endif
220 return 0;
221}
222
223/* Reads a sector + oob*/
224static int sm_read_sector(struct sm_ftl *ftl,
225 int zone, int block, int boffset,
226 uint8_t *buffer, struct sm_oob *oob)
227{
228 struct mtd_info *mtd = ftl->trans->mtd;
229 struct mtd_oob_ops ops;
230 struct sm_oob tmp_oob;
231 int ret;
232 int try = 0;
233
234 /* FTL can contain -1 entries that are by default filled with bits */
235 if (block == -1) {
236 memset(buffer, 0xFF, SM_SECTOR_SIZE);
237 return 0;
238 }
239
240 /* User might not need the oob, but we do for data vertification */
241 if (!oob)
242 oob = &tmp_oob;
243
244 ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
245 ops.ooboffs = 0;
246 ops.ooblen = SM_OOB_SIZE;
247 ops.oobbuf = (void *)oob;
248 ops.len = SM_SECTOR_SIZE;
249 ops.datbuf = buffer;
250
251again:
252 if (try++) {
253 /* Avoid infinite recursion on CIS reads, sm_recheck_media
254 won't help anyway */
255 if (zone == 0 && block == ftl->cis_block && boffset ==
256 ftl->cis_boffset)
257 return ret;
258
259 /* Test if media is stable */
260 if (try == 3 || sm_recheck_media(ftl))
261 return ret;
262 }
263
264 /* Unfortunelly, oob read will _always_ succeed,
265 despite card removal..... */
266 ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
267
268 /* Test for unknown errors */
269 if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) {
270 dbg("read of block %d at zone %d, failed due to error (%d)",
271 block, zone, ret);
272 goto again;
273 }
274
275 /* Do a basic test on the oob, to guard against returned garbage */
276 if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
277 goto again;
278
279 /* This should never happen, unless there is a bug in the mtd driver */
280 WARN_ON(ops.oobretlen != SM_OOB_SIZE);
281 WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
282
283 if (!buffer)
284 return 0;
285
286 /* Test if sector marked as bad */
287 if (!sm_sector_valid(oob)) {
288 dbg("read of block %d at zone %d, failed because it is marked"
289 " as bad" , block, zone);
290 goto again;
291 }
292
293 /* Test ECC*/
294 if (ret == -EBADMSG ||
295 (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
296
297 dbg("read of block %d at zone %d, failed due to ECC error",
298 block, zone);
299 goto again;
300 }
301
302 return 0;
303}
304
305/* Writes a sector to media */
306static int sm_write_sector(struct sm_ftl *ftl,
307 int zone, int block, int boffset,
308 uint8_t *buffer, struct sm_oob *oob)
309{
310 struct mtd_oob_ops ops;
311 struct mtd_info *mtd = ftl->trans->mtd;
312 int ret;
313
314 BUG_ON(ftl->readonly);
315
316 if (zone == 0 && (block == ftl->cis_block || block == 0)) {
317 dbg("attempted to write the CIS!");
318 return -EIO;
319 }
320
321 if (ftl->unstable)
322 return -EIO;
323
324 ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
325 ops.len = SM_SECTOR_SIZE;
326 ops.datbuf = buffer;
327 ops.ooboffs = 0;
328 ops.ooblen = SM_OOB_SIZE;
329 ops.oobbuf = (void *)oob;
330
331 ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
332
333 /* Now we assume that hardware will catch write bitflip errors */
334 /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
335
336 if (ret) {
337 dbg("write to block %d at zone %d, failed with error %d",
338 block, zone, ret);
339
340 sm_recheck_media(ftl);
341 return ret;
342 }
343
344 /* This should never happen, unless there is a bug in the driver */
345 WARN_ON(ops.oobretlen != SM_OOB_SIZE);
346 WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
347
348 return 0;
349}
350
351/* ------------------------ block IO ------------------------------------- */
352
353/* Write a block using data and lba, and invalid sector bitmap */
354static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
355 int zone, int block, int lba,
356 unsigned long invalid_bitmap)
357{
358 struct sm_oob oob;
359 int boffset;
360 int retry = 0;
361
362 /* Initialize the oob with requested values */
363 memset(&oob, 0xFF, SM_OOB_SIZE);
364 sm_write_lba(&oob, lba);
365restart:
366 if (ftl->unstable)
367 return -EIO;
368
369 for (boffset = 0; boffset < ftl->block_size;
370 boffset += SM_SECTOR_SIZE) {
371
372 oob.data_status = 0xFF;
373
374 if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
375
376 sm_printk("sector %d of block at LBA %d of zone %d"
377 " coudn't be read, marking it as invalid",
378 boffset / SM_SECTOR_SIZE, lba, zone);
379
380 oob.data_status = 0;
381 }
382
383#ifdef CONFIG_SM_FTL_MUSEUM
384 if (ftl->smallpagenand) {
385 __nand_calculate_ecc(buf + boffset,
386 SM_SMALL_PAGE, oob.ecc1);
387
388 __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
389 SM_SMALL_PAGE, oob.ecc2);
390 }
391#endif
392 if (!sm_write_sector(ftl, zone, block, boffset,
393 buf + boffset, &oob))
394 continue;
395
396 if (!retry) {
397
398 /* If write fails. try to erase the block */
399 /* This is safe, because we never write in blocks
400 that contain valuable data.
401 This is intended to repair block that are marked
402 as erased, but that isn't fully erased*/
403
404 if (sm_erase_block(ftl, zone, block, 0))
405 return -EIO;
406
407 retry = 1;
408 goto restart;
409 } else {
410 sm_mark_block_bad(ftl, zone, block);
411 return -EIO;
412 }
413 }
414 return 0;
415}
416
417
418/* Mark whole block at offset 'offs' as bad. */
419static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
420{
421 struct sm_oob oob;
422 int boffset;
423
424 memset(&oob, 0xFF, SM_OOB_SIZE);
425 oob.block_status = 0xF0;
426
427 if (ftl->unstable)
428 return;
429
430 if (sm_recheck_media(ftl))
431 return;
432
433 sm_printk("marking block %d of zone %d as bad", block, zone);
434
435 /* We aren't checking the return value, because we don't care */
436 /* This also fails on fake xD cards, but I guess these won't expose
437 any bad blocks till fail completly */
438 for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
439 sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
440}
441
442/*
443 * Erase a block within a zone
444 * If erase succedes, it updates free block fifo, otherwise marks block as bad
445 */
446static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
447 int put_free)
448{
449 struct ftl_zone *zone = &ftl->zones[zone_num];
450 struct mtd_info *mtd = ftl->trans->mtd;
451 struct erase_info erase;
452
453 erase.mtd = mtd;
454 erase.callback = sm_erase_callback;
455 erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
456 erase.len = ftl->block_size;
457 erase.priv = (u_long)ftl;
458
459 if (ftl->unstable)
460 return -EIO;
461
462 BUG_ON(ftl->readonly);
463
464 if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
465 sm_printk("attempted to erase the CIS!");
466 return -EIO;
467 }
468
469 if (mtd->erase(mtd, &erase)) {
470 sm_printk("erase of block %d in zone %d failed",
471 block, zone_num);
472 goto error;
473 }
474
475 if (erase.state == MTD_ERASE_PENDING)
476 wait_for_completion(&ftl->erase_completion);
477
478 if (erase.state != MTD_ERASE_DONE) {
479 sm_printk("erase of block %d in zone %d failed after wait",
480 block, zone_num);
481 goto error;
482 }
483
484 if (put_free)
485 kfifo_in(&zone->free_sectors,
486 (const unsigned char *)&block, sizeof(block));
487
488 return 0;
489error:
490 sm_mark_block_bad(ftl, zone_num, block);
491 return -EIO;
492}
493
494static void sm_erase_callback(struct erase_info *self)
495{
496 struct sm_ftl *ftl = (struct sm_ftl *)self->priv;
497 complete(&ftl->erase_completion);
498}
499
500/* Throughtly test that block is valid. */
501static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
502{
503 int boffset;
504 struct sm_oob oob;
505 int lbas[] = { -3, 0, 0, 0 };
506 int i = 0;
507 int test_lba;
508
509
510 /* First just check that block doesn't look fishy */
511 /* Only blocks that are valid or are sliced in two parts, are
512 accepted */
513 for (boffset = 0; boffset < ftl->block_size;
514 boffset += SM_SECTOR_SIZE) {
515
516 /* This shoudn't happen anyway */
517 if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
518 return -2;
519
520 test_lba = sm_read_lba(&oob);
521
522 if (lbas[i] != test_lba)
523 lbas[++i] = test_lba;
524
525 /* If we found three different LBAs, something is fishy */
526 if (i == 3)
527 return -EIO;
528 }
529
530 /* If the block is sliced (partialy erased usually) erase it */
531 if (i == 2) {
532 sm_erase_block(ftl, zone, block, 1);
533 return 1;
534 }
535
536 return 0;
537}
538
539/* ----------------- media scanning --------------------------------- */
540static const struct chs_entry chs_table[] = {
541 { 1, 125, 4, 4 },
542 { 2, 125, 4, 8 },
543 { 4, 250, 4, 8 },
544 { 8, 250, 4, 16 },
545 { 16, 500, 4, 16 },
546 { 32, 500, 8, 16 },
547 { 64, 500, 8, 32 },
548 { 128, 500, 16, 32 },
549 { 256, 1000, 16, 32 },
550 { 512, 1015, 32, 63 },
551 { 1024, 985, 33, 63 },
552 { 2048, 985, 33, 63 },
553 { 0 },
554};
555
556
557static const uint8_t cis_signature[] = {
558 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
559};
560/* Find out media parameters.
561 * This ideally has to be based on nand id, but for now device size is enough */
562int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
563{
564 int i;
565 int size_in_megs = mtd->size / (1024 * 1024);
566
567 ftl->readonly = mtd->type == MTD_ROM;
568
569 /* Manual settings for very old devices */
570 ftl->zone_count = 1;
571 ftl->smallpagenand = 0;
572
573 switch (size_in_megs) {
574 case 1:
575 /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
576 ftl->zone_size = 256;
577 ftl->max_lba = 250;
578 ftl->block_size = 8 * SM_SECTOR_SIZE;
579 ftl->smallpagenand = 1;
580
581 break;
582 case 2:
583 /* 2 MiB flash SmartMedia (256 byte pages)*/
584 if (mtd->writesize == SM_SMALL_PAGE) {
585 ftl->zone_size = 512;
586 ftl->max_lba = 500;
587 ftl->block_size = 8 * SM_SECTOR_SIZE;
588 ftl->smallpagenand = 1;
589 /* 2 MiB rom SmartMedia */
590 } else {
591
592 if (!ftl->readonly)
593 return -ENODEV;
594
595 ftl->zone_size = 256;
596 ftl->max_lba = 250;
597 ftl->block_size = 16 * SM_SECTOR_SIZE;
598 }
599 break;
600 case 4:
601 /* 4 MiB flash/rom SmartMedia device */
602 ftl->zone_size = 512;
603 ftl->max_lba = 500;
604 ftl->block_size = 16 * SM_SECTOR_SIZE;
605 break;
606 case 8:
607 /* 8 MiB flash/rom SmartMedia device */
608 ftl->zone_size = 1024;
609 ftl->max_lba = 1000;
610 ftl->block_size = 16 * SM_SECTOR_SIZE;
611 }
612
613 /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
614 sizes. SmartMedia cards exist up to 128 MiB and have same layout*/
615 if (size_in_megs >= 16) {
616 ftl->zone_count = size_in_megs / 16;
617 ftl->zone_size = 1024;
618 ftl->max_lba = 1000;
619 ftl->block_size = 32 * SM_SECTOR_SIZE;
620 }
621
622 /* Test for proper write,erase and oob sizes */
623 if (mtd->erasesize > ftl->block_size)
624 return -ENODEV;
625
626 if (mtd->writesize > SM_SECTOR_SIZE)
627 return -ENODEV;
628
629 if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
630 return -ENODEV;
631
632 if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
633 return -ENODEV;
634
635 /* We use these functions for IO */
636 if (!mtd->read_oob || !mtd->write_oob)
637 return -ENODEV;
638
639 /* Find geometry information */
640 for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
641 if (chs_table[i].size == size_in_megs) {
642 ftl->cylinders = chs_table[i].cyl;
643 ftl->heads = chs_table[i].head;
644 ftl->sectors = chs_table[i].sec;
645 return 0;
646 }
647 }
648
649 sm_printk("media has unknown size : %dMiB", size_in_megs);
650 ftl->cylinders = 985;
651 ftl->heads = 33;
652 ftl->sectors = 63;
653 return 0;
654}
655
656/* Validate the CIS */
657static int sm_read_cis(struct sm_ftl *ftl)
658{
659 struct sm_oob oob;
660
661 if (sm_read_sector(ftl,
662 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
663 return -EIO;
664
665 if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
666 return -EIO;
667
668 if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
669 cis_signature, sizeof(cis_signature))) {
670 return 0;
671 }
672
673 return -EIO;
674}
675
676/* Scan the media for the CIS */
677static int sm_find_cis(struct sm_ftl *ftl)
678{
679 struct sm_oob oob;
680 int block, boffset;
681 int block_found = 0;
682 int cis_found = 0;
683
684 /* Search for first valid block */
685 for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
686
687 if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
688 continue;
689
690 if (!sm_block_valid(&oob))
691 continue;
692 block_found = 1;
693 break;
694 }
695
696 if (!block_found)
697 return -EIO;
698
699 /* Search for first valid sector in this block */
700 for (boffset = 0 ; boffset < ftl->block_size;
701 boffset += SM_SECTOR_SIZE) {
702
703 if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
704 continue;
705
706 if (!sm_sector_valid(&oob))
707 continue;
708 break;
709 }
710
711 if (boffset == ftl->block_size)
712 return -EIO;
713
714 ftl->cis_block = block;
715 ftl->cis_boffset = boffset;
716 ftl->cis_page_offset = 0;
717
718 cis_found = !sm_read_cis(ftl);
719
720 if (!cis_found) {
721 ftl->cis_page_offset = SM_SMALL_PAGE;
722 cis_found = !sm_read_cis(ftl);
723 }
724
725 if (cis_found) {
726 dbg("CIS block found at offset %x",
727 block * ftl->block_size +
728 boffset + ftl->cis_page_offset);
729 return 0;
730 }
731 return -EIO;
732}
733
734/* Basic test to determine if underlying mtd device if functional */
735static int sm_recheck_media(struct sm_ftl *ftl)
736{
737 if (sm_read_cis(ftl)) {
738
739 if (!ftl->unstable) {
740 sm_printk("media unstable, not allowing writes");
741 ftl->unstable = 1;
742 }
743 return -EIO;
744 }
745 return 0;
746}
747
748/* Initialize a FTL zone */
749static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
750{
751 struct ftl_zone *zone = &ftl->zones[zone_num];
752 struct sm_oob oob;
753 uint16_t block;
754 int lba;
755 int i = 0;
756
757 dbg("initializing zone %d", zone_num);
758
759 /* Allocate memory for FTL table */
760 zone->lba_to_phys_table = kmalloc(ftl->max_lba * 2, GFP_KERNEL);
761
762 if (!zone->lba_to_phys_table)
763 return -ENOMEM;
764 memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
765
766
767 /* Allocate memory for free sectors FIFO */
768 if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
769 kfree(zone->lba_to_phys_table);
770 return -ENOMEM;
771 }
772
773 /* Now scan the zone */
774 for (block = 0 ; block < ftl->zone_size ; block++) {
775
776 /* Skip blocks till the CIS (including) */
777 if (zone_num == 0 && block <= ftl->cis_block)
778 continue;
779
780 /* Read the oob of first sector */
781 if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob))
782 return -EIO;
783
784 /* Test to see if block is erased. It is enough to test
785 first sector, because erase happens in one shot */
786 if (sm_block_erased(&oob)) {
787 kfifo_in(&zone->free_sectors,
788 (unsigned char *)&block, 2);
789 continue;
790 }
791
792 /* If block is marked as bad, skip it */
793 /* This assumes we can trust first sector*/
794 /* However the way the block valid status is defined, ensures
795 very low probability of failure here */
796 if (!sm_block_valid(&oob)) {
797 dbg("PH %04d <-> <marked bad>", block);
798 continue;
799 }
800
801
802 lba = sm_read_lba(&oob);
803
804 /* Invalid LBA means that block is damaged. */
805 /* We can try to erase it, or mark it as bad, but
806 lets leave that to recovery application */
807 if (lba == -2 || lba >= ftl->max_lba) {
808 dbg("PH %04d <-> LBA %04d(bad)", block, lba);
809 continue;
810 }
811
812
813 /* If there is no collision,
814 just put the sector in the FTL table */
815 if (zone->lba_to_phys_table[lba] < 0) {
816 dbg_verbose("PH %04d <-> LBA %04d", block, lba);
817 zone->lba_to_phys_table[lba] = block;
818 continue;
819 }
820
821 sm_printk("collision"
822 " of LBA %d between blocks %d and %d in zone %d",
823 lba, zone->lba_to_phys_table[lba], block, zone_num);
824
825 /* Test that this block is valid*/
826 if (sm_check_block(ftl, zone_num, block))
827 continue;
828
829 /* Test now the old block */
830 if (sm_check_block(ftl, zone_num,
831 zone->lba_to_phys_table[lba])) {
832 zone->lba_to_phys_table[lba] = block;
833 continue;
834 }
835
836 /* If both blocks are valid and share same LBA, it means that
837 they hold different versions of same data. It not
838 known which is more recent, thus just erase one of them
839 */
840 sm_printk("both blocks are valid, erasing the later");
841 sm_erase_block(ftl, zone_num, block, 1);
842 }
843
844 dbg("zone initialized");
845 zone->initialized = 1;
846
847 /* No free sectors, means that the zone is heavily damaged, write won't
848 work, but it can still can be (partially) read */
849 if (!kfifo_len(&zone->free_sectors)) {
850 sm_printk("no free blocks in zone %d", zone_num);
851 return 0;
852 }
853
854 /* Randomize first block we write to */
855 get_random_bytes(&i, 2);
856 i %= (kfifo_len(&zone->free_sectors) / 2);
857
858 while (i--) {
859 kfifo_out(&zone->free_sectors, (unsigned char *)&block, 2);
860 kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
861 }
862 return 0;
863}
864
865/* Get and automaticly initialize an FTL mapping for one zone */
866struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
867{
868 struct ftl_zone *zone;
869 int error;
870
871 BUG_ON(zone_num >= ftl->zone_count);
872 zone = &ftl->zones[zone_num];
873
874 if (!zone->initialized) {
875 error = sm_init_zone(ftl, zone_num);
876
877 if (error)
878 return ERR_PTR(error);
879 }
880 return zone;
881}
882
883
884/* ----------------- cache handling ------------------------------------------*/
885
886/* Initialize the one block cache */
887void sm_cache_init(struct sm_ftl *ftl)
888{
889 ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
890 ftl->cache_clean = 1;
891 ftl->cache_zone = -1;
892 ftl->cache_block = -1;
893 /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
894}
895
896/* Put sector in one block cache */
897void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
898{
899 memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
900 clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
901 ftl->cache_clean = 0;
902}
903
904/* Read a sector from the cache */
905int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
906{
907 if (test_bit(boffset / SM_SECTOR_SIZE,
908 &ftl->cache_data_invalid_bitmap))
909 return -1;
910
911 memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
912 return 0;
913}
914
915/* Write the cache to hardware */
916int sm_cache_flush(struct sm_ftl *ftl)
917{
918 struct ftl_zone *zone;
919
920 int sector_num;
921 uint16_t write_sector;
922 int zone_num = ftl->cache_zone;
923 int block_num;
924
925 if (ftl->cache_clean)
926 return 0;
927
928 if (ftl->unstable)
929 return -EIO;
930
931 BUG_ON(zone_num < 0);
932 zone = &ftl->zones[zone_num];
933 block_num = zone->lba_to_phys_table[ftl->cache_block];
934
935
936 /* Try to read all unread areas of the cache block*/
937 for_each_bit(sector_num, &ftl->cache_data_invalid_bitmap,
938 ftl->block_size / SM_SECTOR_SIZE) {
939
940 if (!sm_read_sector(ftl,
941 zone_num, block_num, sector_num * SM_SECTOR_SIZE,
942 ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
943 clear_bit(sector_num,
944 &ftl->cache_data_invalid_bitmap);
945 }
946restart:
947
948 if (ftl->unstable)
949 return -EIO;
950 /* No spare blocks */
951 /* We could still continue by erasing the current block,
952 but for such worn out media it doesn't worth the trouble,
953 and the dangers */
954
955 if (!kfifo_len(&zone->free_sectors)) {
956 dbg("no free sectors for write!");
957 return -EIO;
958 }
959
960 kfifo_out(&zone->free_sectors, (unsigned char *)&write_sector, 2);
961
962 if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
963 ftl->cache_block, ftl->cache_data_invalid_bitmap))
964 goto restart;
965
966 /* Update the FTL table */
967 zone->lba_to_phys_table[ftl->cache_block] = write_sector;
968
969 /* Write succesfull, so erase and free the old block */
970 if (block_num > 0)
971 sm_erase_block(ftl, zone_num, block_num, 1);
972
973 sm_cache_init(ftl);
974 return 0;
975}
976
977
978/* flush timer, runs a second after last write */
979static void sm_cache_flush_timer(unsigned long data)
980{
981 struct sm_ftl *ftl = (struct sm_ftl *)data;
982 queue_work(cache_flush_workqueue, &ftl->flush_work);
983}
984
985/* cache flush work, kicked by timer */
986static void sm_cache_flush_work(struct work_struct *work)
987{
988 struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
989 mutex_lock(&ftl->mutex);
990 sm_cache_flush(ftl);
991 mutex_unlock(&ftl->mutex);
992 return;
993}
994
995/* ---------------- outside interface -------------------------------------- */
996
997/* outside interface: read a sector */
998static int sm_read(struct mtd_blktrans_dev *dev,
999 unsigned long sect_no, char *buf)
1000{
1001 struct sm_ftl *ftl = dev->priv;
1002 struct ftl_zone *zone;
1003 int error = 0, in_cache = 0;
1004 int zone_num, block, boffset;
1005
1006 sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
1007 mutex_lock(&ftl->mutex);
1008
1009
1010 zone = sm_get_zone(ftl, zone_num);
1011 if (IS_ERR(zone)) {
1012 error = PTR_ERR(zone);
1013 goto unlock;
1014 }
1015
1016 /* Have to look at cache first */
1017 if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
1018 in_cache = 1;
1019 if (!sm_cache_get(ftl, buf, boffset))
1020 goto unlock;
1021 }
1022
1023 /* Translate the block and return if doesn't exist in the table */
1024 block = zone->lba_to_phys_table[block];
1025
1026 if (block == -1) {
1027 memset(buf, 0xFF, SM_SECTOR_SIZE);
1028 goto unlock;
1029 }
1030
1031 if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
1032 error = -EIO;
1033 goto unlock;
1034 }
1035
1036 if (in_cache)
1037 sm_cache_put(ftl, buf, boffset);
1038unlock:
1039 mutex_unlock(&ftl->mutex);
1040 return error;
1041}
1042
1043/* outside interface: write a sector */
1044static int sm_write(struct mtd_blktrans_dev *dev,
1045 unsigned long sec_no, char *buf)
1046{
1047 struct sm_ftl *ftl = dev->priv;
1048 struct ftl_zone *zone;
1049 int error, zone_num, block, boffset;
1050
1051 BUG_ON(ftl->readonly);
1052 sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
1053
1054 /* No need in flush thread running now */
1055 del_timer(&ftl->timer);
1056 mutex_lock(&ftl->mutex);
1057
1058 zone = sm_get_zone(ftl, zone_num);
1059 if (IS_ERR(zone)) {
1060 error = PTR_ERR(zone);
1061 goto unlock;
1062 }
1063
1064 /* If entry is not in cache, flush it */
1065 if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
1066
1067 error = sm_cache_flush(ftl);
1068 if (error)
1069 goto unlock;
1070
1071 ftl->cache_block = block;
1072 ftl->cache_zone = zone_num;
1073 }
1074
1075 sm_cache_put(ftl, buf, boffset);
1076unlock:
1077 mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
1078 mutex_unlock(&ftl->mutex);
1079 return error;
1080}
1081
1082/* outside interface: flush everything */
1083static int sm_flush(struct mtd_blktrans_dev *dev)
1084{
1085 struct sm_ftl *ftl = dev->priv;
1086 int retval;
1087
1088 mutex_lock(&ftl->mutex);
1089 retval = sm_cache_flush(ftl);
1090 mutex_unlock(&ftl->mutex);
1091 return retval;
1092}
1093
1094/* outside interface: device is released */
1095static int sm_release(struct mtd_blktrans_dev *dev)
1096{
1097 struct sm_ftl *ftl = dev->priv;
1098
1099 mutex_lock(&ftl->mutex);
1100 del_timer_sync(&ftl->timer);
1101 cancel_work_sync(&ftl->flush_work);
1102 sm_cache_flush(ftl);
1103 mutex_unlock(&ftl->mutex);
1104 return 0;
1105}
1106
1107/* outside interface: get geometry */
1108static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
1109{
1110 struct sm_ftl *ftl = dev->priv;
1111 geo->heads = ftl->heads;
1112 geo->sectors = ftl->sectors;
1113 geo->cylinders = ftl->cylinders;
1114 return 0;
1115}
1116
1117/* external interface: main initialization function */
1118static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1119{
1120 struct mtd_blktrans_dev *trans;
1121 struct sm_ftl *ftl;
1122
1123 /* Allocate & initialize our private structure */
1124 ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
1125 if (!ftl)
1126 goto error1;
1127
1128
1129 mutex_init(&ftl->mutex);
1130 setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
1131 INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
1132 init_completion(&ftl->erase_completion);
1133
1134 /* Read media information */
1135 if (sm_get_media_info(ftl, mtd)) {
1136 dbg("found unsupported mtd device, aborting");
1137 goto error2;
1138 }
1139
1140
1141 /* Allocate temporary CIS buffer for read retry support */
1142 ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
1143 if (!ftl->cis_buffer)
1144 goto error2;
1145
1146 /* Allocate zone array, it will be initialized on demand */
1147 ftl->zones = kzalloc(sizeof(struct ftl_zone) * ftl->zone_count,
1148 GFP_KERNEL);
1149 if (!ftl->zones)
1150 goto error3;
1151
1152 /* Allocate the cache*/
1153 ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
1154
1155 if (!ftl->cache_data)
1156 goto error4;
1157
1158 sm_cache_init(ftl);
1159
1160
1161 /* Allocate upper layer structure and initialize it */
1162 trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
1163 if (!trans)
1164 goto error5;
1165
1166 ftl->trans = trans;
1167 trans->priv = ftl;
1168
1169 trans->tr = tr;
1170 trans->mtd = mtd;
1171 trans->devnum = -1;
1172 trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
1173 trans->readonly = ftl->readonly;
1174
1175 if (sm_find_cis(ftl)) {
1176 dbg("CIS not found on mtd device, aborting");
1177 goto error6;
1178 }
1179
1180 ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
1181 trans->disk_attributes = ftl->disk_attributes;
1182
1183 sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
1184 (int)(mtd->size / (1024 * 1024)), mtd->index);
1185
1186 dbg("FTL layout:");
1187 dbg("%d zone(s), each consists of %d blocks (+%d spares)",
1188 ftl->zone_count, ftl->max_lba,
1189 ftl->zone_size - ftl->max_lba);
1190 dbg("each block consists of %d bytes",
1191 ftl->block_size);
1192
1193
1194 /* Register device*/
1195 if (add_mtd_blktrans_dev(trans)) {
1196 dbg("error in mtdblktrans layer");
1197 goto error6;
1198 }
1199 return;
1200error6:
1201 kfree(trans);
1202error5:
1203 kfree(ftl->cache_data);
1204error4:
1205 kfree(ftl->zones);
1206error3:
1207 kfree(ftl->cis_buffer);
1208error2:
1209 kfree(ftl);
1210error1:
1211 return;
1212}
1213
1214/* main interface: device {surprise,} removal */
1215static void sm_remove_dev(struct mtd_blktrans_dev *dev)
1216{
1217 struct sm_ftl *ftl = dev->priv;
1218 int i;
1219
1220 del_mtd_blktrans_dev(dev);
1221 ftl->trans = NULL;
1222
1223 for (i = 0 ; i < ftl->zone_count; i++) {
1224
1225 if (!ftl->zones[i].initialized)
1226 continue;
1227
1228 kfree(ftl->zones[i].lba_to_phys_table);
1229 kfifo_free(&ftl->zones[i].free_sectors);
1230 }
1231
1232 sm_delete_sysfs_attributes(ftl);
1233 kfree(ftl->cis_buffer);
1234 kfree(ftl->zones);
1235 kfree(ftl->cache_data);
1236 kfree(ftl);
1237}
1238
1239static struct mtd_blktrans_ops sm_ftl_ops = {
1240 .name = "smblk",
1241 .major = -1,
1242 .part_bits = SM_FTL_PARTN_BITS,
1243 .blksize = SM_SECTOR_SIZE,
1244 .getgeo = sm_getgeo,
1245
1246 .add_mtd = sm_add_mtd,
1247 .remove_dev = sm_remove_dev,
1248
1249 .readsect = sm_read,
1250 .writesect = sm_write,
1251
1252 .flush = sm_flush,
1253 .release = sm_release,
1254
1255 .owner = THIS_MODULE,
1256};
1257
1258static __init int sm_module_init(void)
1259{
1260 int error = 0;
1261 cache_flush_workqueue = create_freezeable_workqueue("smflush");
1262
1263 if (IS_ERR(cache_flush_workqueue))
1264 return PTR_ERR(cache_flush_workqueue);
1265
1266 error = register_mtd_blktrans(&sm_ftl_ops);
1267 if (error)
1268 destroy_workqueue(cache_flush_workqueue);
1269 return error;
1270
1271}
1272
1273static void __exit sm_module_exit(void)
1274{
1275 destroy_workqueue(cache_flush_workqueue);
1276 deregister_mtd_blktrans(&sm_ftl_ops);
1277}
1278
1279module_init(sm_module_init);
1280module_exit(sm_module_exit);
1281
1282MODULE_LICENSE("GPL");
1283MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
1284MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");