aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi/wl.c
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2009-05-24 07:13:34 -0400
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2009-06-02 06:53:35 -0400
commitb86a2c56e512f46d140a4bcb4e35e8a7d4a99a4b (patch)
tree59c3e036dfd767b73e700bd7fd8cb4bee15c4f58 /drivers/mtd/ubi/wl.c
parent87960c0b12d0c5a0b37e0c79aef77aa1a0b10d44 (diff)
UBI: do not switch to R/O mode on read errors
This patch improves UBI errors handling. ATM UBI switches to R/O mode when the WL worker fails to read the source PEB. This means that the upper layers (e.g., UBIFS) has no chances to unmap the erroneous PEB and fix the error. This patch changes this behaviour and makes UBI put PEBs like this into a separate RB-tree, thus preventing the WL worker from hitting the same read errors again and again. But there is a 10% limit on a maximum amount of PEBs like this. If there are too much of them, UBI switches to R/O mode. Additionally, this patch teaches UBI not to panic and switch to R/O mode if after a PEB has been copied, the target LEB cannot be read back. Instead, now UBI cancels the operation and schedules the target PEB for torturing. The error paths has been tested by ingecting errors into 'ubi_eba_copy_leb()'. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd/ubi/wl.c')
-rw-r--r--drivers/mtd/ubi/wl.c45
1 files changed, 38 insertions, 7 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 793882ba2a6e..9d1d3595a240 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -55,8 +55,8 @@
55 * 55 *
56 * As it was said, for the UBI sub-system all physical eraseblocks are either 56 * As it was said, for the UBI sub-system all physical eraseblocks are either
57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while 57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
58 * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or 58 * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
59 * (temporarily) in the @wl->pq queue. 59 * RB-trees, as well as (temporarily) in the @wl->pq queue.
60 * 60 *
61 * When the WL sub-system returns a physical eraseblock, the physical 61 * When the WL sub-system returns a physical eraseblock, the physical
62 * eraseblock is protected from being moved for some "time". For this reason, 62 * eraseblock is protected from being moved for some "time". For this reason,
@@ -83,6 +83,8 @@
83 * used. The former state corresponds to the @wl->free tree. The latter state 83 * used. The former state corresponds to the @wl->free tree. The latter state
84 * is split up on several sub-states: 84 * is split up on several sub-states:
85 * o the WL movement is allowed (@wl->used tree); 85 * o the WL movement is allowed (@wl->used tree);
86 * o the WL movement is disallowed (@wl->erroneous) becouse the PEB is
87 * erroneous - e.g., there was a read error;
86 * o the WL movement is temporarily prohibited (@wl->pq queue); 88 * o the WL movement is temporarily prohibited (@wl->pq queue);
87 * o scrubbing is needed (@wl->scrub tree). 89 * o scrubbing is needed (@wl->scrub tree).
88 * 90 *
@@ -653,7 +655,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
653static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 655static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
654 int cancel) 656 int cancel)
655{ 657{
656 int err, scrubbing = 0, torture = 0, protect = 0; 658 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
657 struct ubi_wl_entry *e1, *e2; 659 struct ubi_wl_entry *e1, *e2;
658 struct ubi_vid_hdr *vid_hdr; 660 struct ubi_vid_hdr *vid_hdr;
659 661
@@ -769,13 +771,31 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
769 goto out_not_moved; 771 goto out_not_moved;
770 } 772 }
771 773
772 if (err == MOVE_CANCEL_BITFLIPS || 774 if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
773 err == MOVE_TARGET_WR_ERR) { 775 err == MOVE_TARGET_RD_ERR) {
774 /* Target PEB bit-flips or write error, torture it */ 776 /* Target PEB bit-flips or write error, torture it */
775 torture = 1; 777 torture = 1;
776 goto out_not_moved; 778 goto out_not_moved;
777 } 779 }
778 780
781 if (err == MOVE_SOURCE_RD_ERR) {
782 /*
783 * An error happened while reading the source PEB. Do
784 * not switch to R/O mode in this case, and give the
785 * upper layers a possibility to recover from this,
786 * e.g. by unmapping corresponding LEB. Instead, just
787 * put thie PEB to the @ubi->erroneus list to prevent
788 * UBI from trying to move the over and over again.
789 */
790 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
791 ubi_err("too many erroneous eraseblocks (%d)",
792 ubi->erroneous_peb_count);
793 goto out_error;
794 }
795 erroneous = 1;
796 goto out_not_moved;
797 }
798
779 if (err < 0) 799 if (err < 0)
780 goto out_error; 800 goto out_error;
781 801
@@ -832,7 +852,10 @@ out_not_moved:
832 spin_lock(&ubi->wl_lock); 852 spin_lock(&ubi->wl_lock);
833 if (protect) 853 if (protect)
834 prot_queue_add(ubi, e1); 854 prot_queue_add(ubi, e1);
835 else if (scrubbing) 855 else if (erroneous) {
856 wl_tree_add(e1, &ubi->erroneous);
857 ubi->erroneous_peb_count += 1;
858 } else if (scrubbing)
836 wl_tree_add(e1, &ubi->scrub); 859 wl_tree_add(e1, &ubi->scrub);
837 else 860 else
838 wl_tree_add(e1, &ubi->used); 861 wl_tree_add(e1, &ubi->used);
@@ -1116,6 +1139,13 @@ retry:
1116 } else if (in_wl_tree(e, &ubi->scrub)) { 1139 } else if (in_wl_tree(e, &ubi->scrub)) {
1117 paranoid_check_in_wl_tree(e, &ubi->scrub); 1140 paranoid_check_in_wl_tree(e, &ubi->scrub);
1118 rb_erase(&e->u.rb, &ubi->scrub); 1141 rb_erase(&e->u.rb, &ubi->scrub);
1142 } else if (in_wl_tree(e, &ubi->erroneous)) {
1143 paranoid_check_in_wl_tree(e, &ubi->erroneous);
1144 rb_erase(&e->u.rb, &ubi->erroneous);
1145 ubi->erroneous_peb_count -= 1;
1146 ubi_assert(ubi->erroneous_peb_count >= 0);
1147 /* Erronious PEBs should be tortured */
1148 torture = 1;
1119 } else { 1149 } else {
1120 err = prot_queue_del(ubi, e->pnum); 1150 err = prot_queue_del(ubi, e->pnum);
1121 if (err) { 1151 if (err) {
@@ -1364,7 +1394,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1364 struct ubi_scan_leb *seb, *tmp; 1394 struct ubi_scan_leb *seb, *tmp;
1365 struct ubi_wl_entry *e; 1395 struct ubi_wl_entry *e;
1366 1396
1367 ubi->used = ubi->free = ubi->scrub = RB_ROOT; 1397 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1368 spin_lock_init(&ubi->wl_lock); 1398 spin_lock_init(&ubi->wl_lock);
1369 mutex_init(&ubi->move_mutex); 1399 mutex_init(&ubi->move_mutex);
1370 init_rwsem(&ubi->work_sem); 1400 init_rwsem(&ubi->work_sem);
@@ -1502,6 +1532,7 @@ void ubi_wl_close(struct ubi_device *ubi)
1502 cancel_pending(ubi); 1532 cancel_pending(ubi);
1503 protection_queue_destroy(ubi); 1533 protection_queue_destroy(ubi);
1504 tree_destroy(&ubi->used); 1534 tree_destroy(&ubi->used);
1535 tree_destroy(&ubi->erroneous);
1505 tree_destroy(&ubi->free); 1536 tree_destroy(&ubi->free);
1506 tree_destroy(&ubi->scrub); 1537 tree_destroy(&ubi->scrub);
1507 kfree(ubi->lookuptbl); 1538 kfree(ubi->lookuptbl);