diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/mtd/ubi/build.c | 9 | ||||
-rw-r--r-- | drivers/mtd/ubi/eba.c | 19 | ||||
-rw-r--r-- | drivers/mtd/ubi/ubi.h | 16 | ||||
-rw-r--r-- | drivers/mtd/ubi/wl.c | 45 |
4 files changed, 74 insertions, 15 deletions
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 1405b556c65a..d3da66682667 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c | |||
@@ -633,6 +633,15 @@ static int io_init(struct ubi_device *ubi) | |||
633 | } | 633 | } |
634 | 634 | ||
635 | /* | 635 | /* |
636 | * Set maximum amount of physical erroneous eraseblocks to be 10%. | ||
637 | * Erroneous PEB are those which have read errors. | ||
638 | */ | ||
639 | ubi->max_erroneous = ubi->peb_count / 10; | ||
640 | if (ubi->max_erroneous < 16) | ||
641 | ubi->max_erroneous = 16; | ||
642 | dbg_msg("max_erroneous %d", ubi->max_erroneous); | ||
643 | |||
644 | /* | ||
636 | * It may happen that EC and VID headers are situated in one minimal | 645 | * It may happen that EC and VID headers are situated in one minimal |
637 | * I/O unit. In this case we can only accept this UBI image in | 646 | * I/O unit. In this case we can only accept this UBI image in |
638 | * read-only mode. | 647 | * read-only mode. |
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 587b6cb5040f..632b95f3ff3f 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
@@ -419,8 +419,9 @@ retry: | |||
419 | * not implemented. | 419 | * not implemented. |
420 | */ | 420 | */ |
421 | if (err == UBI_IO_BAD_VID_HDR) { | 421 | if (err == UBI_IO_BAD_VID_HDR) { |
422 | ubi_warn("bad VID header at PEB %d, LEB" | 422 | ubi_warn("corrupted VID header at PEB " |
423 | "%d:%d", pnum, vol_id, lnum); | 423 | "%d, LEB %d:%d", pnum, vol_id, |
424 | lnum); | ||
424 | err = -EBADMSG; | 425 | err = -EBADMSG; |
425 | } else | 426 | } else |
426 | ubi_ro_mode(ubi); | 427 | ubi_ro_mode(ubi); |
@@ -1032,6 +1033,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
1032 | if (err && err != UBI_IO_BITFLIPS) { | 1033 | if (err && err != UBI_IO_BITFLIPS) { |
1033 | ubi_warn("error %d while reading data from PEB %d", | 1034 | ubi_warn("error %d while reading data from PEB %d", |
1034 | err, from); | 1035 | err, from); |
1036 | if (err == -EIO) | ||
1037 | err = MOVE_SOURCE_RD_ERR; | ||
1035 | goto out_unlock_buf; | 1038 | goto out_unlock_buf; |
1036 | } | 1039 | } |
1037 | 1040 | ||
@@ -1078,9 +1081,11 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
1078 | /* Read the VID header back and check if it was written correctly */ | 1081 | /* Read the VID header back and check if it was written correctly */ |
1079 | err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); | 1082 | err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); |
1080 | if (err) { | 1083 | if (err) { |
1081 | if (err != UBI_IO_BITFLIPS) | 1084 | if (err != UBI_IO_BITFLIPS) { |
1082 | ubi_warn("cannot read VID header back from PEB %d", to); | 1085 | ubi_warn("cannot read VID header back from PEB %d", to); |
1083 | else | 1086 | if (err == -EIO) |
1087 | err = MOVE_TARGET_RD_ERR; | ||
1088 | } else | ||
1084 | err = MOVE_CANCEL_BITFLIPS; | 1089 | err = MOVE_CANCEL_BITFLIPS; |
1085 | goto out_unlock_buf; | 1090 | goto out_unlock_buf; |
1086 | } | 1091 | } |
@@ -1102,10 +1107,12 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
1102 | 1107 | ||
1103 | err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size); | 1108 | err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size); |
1104 | if (err) { | 1109 | if (err) { |
1105 | if (err != UBI_IO_BITFLIPS) | 1110 | if (err != UBI_IO_BITFLIPS) { |
1106 | ubi_warn("cannot read data back from PEB %d", | 1111 | ubi_warn("cannot read data back from PEB %d", |
1107 | to); | 1112 | to); |
1108 | else | 1113 | if (err == -EIO) |
1114 | err = MOVE_TARGET_RD_ERR; | ||
1115 | } else | ||
1109 | err = MOVE_CANCEL_BITFLIPS; | 1116 | err = MOVE_CANCEL_BITFLIPS; |
1110 | goto out_unlock_buf; | 1117 | goto out_unlock_buf; |
1111 | } | 1118 | } |
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index fd9b20da5b6b..6d929329a8d5 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h | |||
@@ -105,6 +105,10 @@ enum { | |||
105 | * | 105 | * |
106 | * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source | 106 | * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source |
107 | * PEB was put meanwhile, or there is I/O on the source PEB | 107 | * PEB was put meanwhile, or there is I/O on the source PEB |
108 | * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source | ||
109 | * PEB | ||
110 | * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target | ||
111 | * PEB | ||
108 | * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target | 112 | * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target |
109 | * PEB | 113 | * PEB |
110 | * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the | 114 | * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the |
@@ -112,6 +116,8 @@ enum { | |||
112 | */ | 116 | */ |
113 | enum { | 117 | enum { |
114 | MOVE_CANCEL_RACE = 1, | 118 | MOVE_CANCEL_RACE = 1, |
119 | MOVE_SOURCE_RD_ERR, | ||
120 | MOVE_TARGET_RD_ERR, | ||
115 | MOVE_TARGET_WR_ERR, | 121 | MOVE_TARGET_WR_ERR, |
116 | MOVE_CANCEL_BITFLIPS, | 122 | MOVE_CANCEL_BITFLIPS, |
117 | }; | 123 | }; |
@@ -334,14 +340,15 @@ struct ubi_wl_entry; | |||
334 | * @alc_mutex: serializes "atomic LEB change" operations | 340 | * @alc_mutex: serializes "atomic LEB change" operations |
335 | * | 341 | * |
336 | * @used: RB-tree of used physical eraseblocks | 342 | * @used: RB-tree of used physical eraseblocks |
343 | * @erroneous: RB-tree of erroneous used physical eraseblocks | ||
337 | * @free: RB-tree of free physical eraseblocks | 344 | * @free: RB-tree of free physical eraseblocks |
338 | * @scrub: RB-tree of physical eraseblocks which need scrubbing | 345 | * @scrub: RB-tree of physical eraseblocks which need scrubbing |
339 | * @pq: protection queue (contain physical eraseblocks which are temporarily | 346 | * @pq: protection queue (contain physical eraseblocks which are temporarily |
340 | * protected from the wear-leveling worker) | 347 | * protected from the wear-leveling worker) |
341 | * @pq_head: protection queue head | 348 | * @pq_head: protection queue head |
342 | * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, | 349 | * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, |
343 | * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works | 350 | * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works and |
344 | * fields | 351 | * @erroneous_peb_count fields |
345 | * @move_mutex: serializes eraseblock moves | 352 | * @move_mutex: serializes eraseblock moves |
346 | * @work_sem: synchronizes the WL worker with use tasks | 353 | * @work_sem: synchronizes the WL worker with use tasks |
347 | * @wl_scheduled: non-zero if the wear-leveling was scheduled | 354 | * @wl_scheduled: non-zero if the wear-leveling was scheduled |
@@ -361,6 +368,8 @@ struct ubi_wl_entry; | |||
361 | * @peb_size: physical eraseblock size | 368 | * @peb_size: physical eraseblock size |
362 | * @bad_peb_count: count of bad physical eraseblocks | 369 | * @bad_peb_count: count of bad physical eraseblocks |
363 | * @good_peb_count: count of good physical eraseblocks | 370 | * @good_peb_count: count of good physical eraseblocks |
371 | * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous | ||
372 | * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks | ||
364 | * @min_io_size: minimal input/output unit size of the underlying MTD device | 373 | * @min_io_size: minimal input/output unit size of the underlying MTD device |
365 | * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers | 374 | * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers |
366 | * @ro_mode: if the UBI device is in read-only mode | 375 | * @ro_mode: if the UBI device is in read-only mode |
@@ -418,6 +427,7 @@ struct ubi_device { | |||
418 | 427 | ||
419 | /* Wear-leveling sub-system's stuff */ | 428 | /* Wear-leveling sub-system's stuff */ |
420 | struct rb_root used; | 429 | struct rb_root used; |
430 | struct rb_root erroneous; | ||
421 | struct rb_root free; | 431 | struct rb_root free; |
422 | struct rb_root scrub; | 432 | struct rb_root scrub; |
423 | struct list_head pq[UBI_PROT_QUEUE_LEN]; | 433 | struct list_head pq[UBI_PROT_QUEUE_LEN]; |
@@ -442,6 +452,8 @@ struct ubi_device { | |||
442 | int peb_size; | 452 | int peb_size; |
443 | int bad_peb_count; | 453 | int bad_peb_count; |
444 | int good_peb_count; | 454 | int good_peb_count; |
455 | int erroneous_peb_count; | ||
456 | int max_erroneous; | ||
445 | int min_io_size; | 457 | int min_io_size; |
446 | int hdrs_min_io_size; | 458 | int hdrs_min_io_size; |
447 | int ro_mode; | 459 | int ro_mode; |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 793882ba2a6e..9d1d3595a240 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -55,8 +55,8 @@ | |||
55 | * | 55 | * |
56 | * As it was said, for the UBI sub-system all physical eraseblocks are either | 56 | * As it was said, for the UBI sub-system all physical eraseblocks are either |
57 | * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while | 57 | * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while |
58 | * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or | 58 | * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub |
59 | * (temporarily) in the @wl->pq queue. | 59 | * RB-trees, as well as (temporarily) in the @wl->pq queue. |
60 | * | 60 | * |
61 | * When the WL sub-system returns a physical eraseblock, the physical | 61 | * When the WL sub-system returns a physical eraseblock, the physical |
62 | * eraseblock is protected from being moved for some "time". For this reason, | 62 | * eraseblock is protected from being moved for some "time". For this reason, |
@@ -83,6 +83,8 @@ | |||
83 | * used. The former state corresponds to the @wl->free tree. The latter state | 83 | * used. The former state corresponds to the @wl->free tree. The latter state |
84 | * is split up on several sub-states: | 84 | * is split up on several sub-states: |
85 | * o the WL movement is allowed (@wl->used tree); | 85 | * o the WL movement is allowed (@wl->used tree); |
86 | * o the WL movement is disallowed (@wl->erroneous) becouse the PEB is | ||
87 | * erroneous - e.g., there was a read error; | ||
86 | * o the WL movement is temporarily prohibited (@wl->pq queue); | 88 | * o the WL movement is temporarily prohibited (@wl->pq queue); |
87 | * o scrubbing is needed (@wl->scrub tree). | 89 | * o scrubbing is needed (@wl->scrub tree). |
88 | * | 90 | * |
@@ -653,7 +655,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | |||
653 | static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | 655 | static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, |
654 | int cancel) | 656 | int cancel) |
655 | { | 657 | { |
656 | int err, scrubbing = 0, torture = 0, protect = 0; | 658 | int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; |
657 | struct ubi_wl_entry *e1, *e2; | 659 | struct ubi_wl_entry *e1, *e2; |
658 | struct ubi_vid_hdr *vid_hdr; | 660 | struct ubi_vid_hdr *vid_hdr; |
659 | 661 | ||
@@ -769,13 +771,31 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
769 | goto out_not_moved; | 771 | goto out_not_moved; |
770 | } | 772 | } |
771 | 773 | ||
772 | if (err == MOVE_CANCEL_BITFLIPS || | 774 | if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR || |
773 | err == MOVE_TARGET_WR_ERR) { | 775 | err == MOVE_TARGET_RD_ERR) { |
774 | /* Target PEB bit-flips or write error, torture it */ | 776 | /* Target PEB bit-flips or write error, torture it */ |
775 | torture = 1; | 777 | torture = 1; |
776 | goto out_not_moved; | 778 | goto out_not_moved; |
777 | } | 779 | } |
778 | 780 | ||
781 | if (err == MOVE_SOURCE_RD_ERR) { | ||
782 | /* | ||
783 | * An error happened while reading the source PEB. Do | ||
784 | * not switch to R/O mode in this case, and give the | ||
785 | * upper layers a possibility to recover from this, | ||
786 | * e.g. by unmapping corresponding LEB. Instead, just | ||
787 | * put thie PEB to the @ubi->erroneus list to prevent | ||
788 | * UBI from trying to move the over and over again. | ||
789 | */ | ||
790 | if (ubi->erroneous_peb_count > ubi->max_erroneous) { | ||
791 | ubi_err("too many erroneous eraseblocks (%d)", | ||
792 | ubi->erroneous_peb_count); | ||
793 | goto out_error; | ||
794 | } | ||
795 | erroneous = 1; | ||
796 | goto out_not_moved; | ||
797 | } | ||
798 | |||
779 | if (err < 0) | 799 | if (err < 0) |
780 | goto out_error; | 800 | goto out_error; |
781 | 801 | ||
@@ -832,7 +852,10 @@ out_not_moved: | |||
832 | spin_lock(&ubi->wl_lock); | 852 | spin_lock(&ubi->wl_lock); |
833 | if (protect) | 853 | if (protect) |
834 | prot_queue_add(ubi, e1); | 854 | prot_queue_add(ubi, e1); |
835 | else if (scrubbing) | 855 | else if (erroneous) { |
856 | wl_tree_add(e1, &ubi->erroneous); | ||
857 | ubi->erroneous_peb_count += 1; | ||
858 | } else if (scrubbing) | ||
836 | wl_tree_add(e1, &ubi->scrub); | 859 | wl_tree_add(e1, &ubi->scrub); |
837 | else | 860 | else |
838 | wl_tree_add(e1, &ubi->used); | 861 | wl_tree_add(e1, &ubi->used); |
@@ -1116,6 +1139,13 @@ retry: | |||
1116 | } else if (in_wl_tree(e, &ubi->scrub)) { | 1139 | } else if (in_wl_tree(e, &ubi->scrub)) { |
1117 | paranoid_check_in_wl_tree(e, &ubi->scrub); | 1140 | paranoid_check_in_wl_tree(e, &ubi->scrub); |
1118 | rb_erase(&e->u.rb, &ubi->scrub); | 1141 | rb_erase(&e->u.rb, &ubi->scrub); |
1142 | } else if (in_wl_tree(e, &ubi->erroneous)) { | ||
1143 | paranoid_check_in_wl_tree(e, &ubi->erroneous); | ||
1144 | rb_erase(&e->u.rb, &ubi->erroneous); | ||
1145 | ubi->erroneous_peb_count -= 1; | ||
1146 | ubi_assert(ubi->erroneous_peb_count >= 0); | ||
1147 | /* Erronious PEBs should be tortured */ | ||
1148 | torture = 1; | ||
1119 | } else { | 1149 | } else { |
1120 | err = prot_queue_del(ubi, e->pnum); | 1150 | err = prot_queue_del(ubi, e->pnum); |
1121 | if (err) { | 1151 | if (err) { |
@@ -1364,7 +1394,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) | |||
1364 | struct ubi_scan_leb *seb, *tmp; | 1394 | struct ubi_scan_leb *seb, *tmp; |
1365 | struct ubi_wl_entry *e; | 1395 | struct ubi_wl_entry *e; |
1366 | 1396 | ||
1367 | ubi->used = ubi->free = ubi->scrub = RB_ROOT; | 1397 | ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT; |
1368 | spin_lock_init(&ubi->wl_lock); | 1398 | spin_lock_init(&ubi->wl_lock); |
1369 | mutex_init(&ubi->move_mutex); | 1399 | mutex_init(&ubi->move_mutex); |
1370 | init_rwsem(&ubi->work_sem); | 1400 | init_rwsem(&ubi->work_sem); |
@@ -1502,6 +1532,7 @@ void ubi_wl_close(struct ubi_device *ubi) | |||
1502 | cancel_pending(ubi); | 1532 | cancel_pending(ubi); |
1503 | protection_queue_destroy(ubi); | 1533 | protection_queue_destroy(ubi); |
1504 | tree_destroy(&ubi->used); | 1534 | tree_destroy(&ubi->used); |
1535 | tree_destroy(&ubi->erroneous); | ||
1505 | tree_destroy(&ubi->free); | 1536 | tree_destroy(&ubi->free); |
1506 | tree_destroy(&ubi->scrub); | 1537 | tree_destroy(&ubi->scrub); |
1507 | kfree(ubi->lookuptbl); | 1538 | kfree(ubi->lookuptbl); |