aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi/wl.c
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-12-05 06:37:02 -0500
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-12-05 06:46:50 -0500
commit6fa6f5bbc3a2ad833a3d4b798140602004f70f5a (patch)
tree96aa4b8b9f672c098f12db1d359d8e942c8a80aa /drivers/mtd/ubi/wl.c
parent3c98b0a043f25fa44b289c2f35b9d6ad1d859ac9 (diff)
UBI: handle write errors in WL worker
When a PEB is moved and a write error happens, UBI switches to R/O mode, which is wrong, because we just copy the data and may select a different PEB and re-try this. This patch fixes WL worker's behavior. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd/ubi/wl.c')
-rw-r--r--drivers/mtd/ubi/wl.c32
1 files changed, 20 insertions, 12 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 442099d76ec..abf65ea414e 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -738,7 +738,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
738static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 738static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
739 int cancel) 739 int cancel)
740{ 740{
741 int err, scrubbing = 0; 741 int err, scrubbing = 0, torture = 0;
742 struct ubi_wl_prot_entry *uninitialized_var(pe); 742 struct ubi_wl_prot_entry *uninitialized_var(pe);
743 struct ubi_wl_entry *e1, *e2; 743 struct ubi_wl_entry *e1, *e2;
744 struct ubi_vid_hdr *vid_hdr; 744 struct ubi_vid_hdr *vid_hdr;
@@ -842,20 +842,26 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
842 842
843 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 843 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
844 if (err) { 844 if (err) {
845 845 if (err == -EAGAIN)
846 goto out_not_moved;
846 if (err < 0) 847 if (err < 0)
847 goto out_error; 848 goto out_error;
848 if (err == 1) 849 if (err == 2) {
850 /* Target PEB write error, torture it */
851 torture = 1;
849 goto out_not_moved; 852 goto out_not_moved;
853 }
850 854
851 /* 855 /*
852 * For some reason the LEB was not moved - it might be because 856 * The LEB has not been moved because the volume is being
853 * the volume is being deleted. We should prevent this PEB from 857 * deleted or the PEB has been put meanwhile. We should prevent
854 * being selected for wear-levelling movement for some "time", 858 * this PEB from being selected for wear-leveling movement
855 * so put it to the protection tree. 859 * again, so put it to the protection tree.
856 */ 860 */
857 861
858 dbg_wl("cancelled moving PEB %d", e1->pnum); 862 dbg_wl("canceled moving PEB %d", e1->pnum);
863 ubi_assert(err == 1);
864
859 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); 865 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
860 if (!pe) { 866 if (!pe) {
861 err = -ENOMEM; 867 err = -ENOMEM;
@@ -920,9 +926,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
920 /* 926 /*
921 * For some reasons the LEB was not moved, might be an error, might be 927 * For some reasons the LEB was not moved, might be an error, might be
922 * something else. @e1 was not changed, so return it back. @e2 might 928 * something else. @e1 was not changed, so return it back. @e2 might
923 * be changed, schedule it for erasure. 929 * have been changed, schedule it for erasure.
924 */ 930 */
925out_not_moved: 931out_not_moved:
932 dbg_wl("canceled moving PEB %d", e1->pnum);
926 ubi_free_vid_hdr(ubi, vid_hdr); 933 ubi_free_vid_hdr(ubi, vid_hdr);
927 vid_hdr = NULL; 934 vid_hdr = NULL;
928 spin_lock(&ubi->wl_lock); 935 spin_lock(&ubi->wl_lock);
@@ -930,12 +937,13 @@ out_not_moved:
930 wl_tree_add(e1, &ubi->scrub); 937 wl_tree_add(e1, &ubi->scrub);
931 else 938 else
932 wl_tree_add(e1, &ubi->used); 939 wl_tree_add(e1, &ubi->used);
940 ubi_assert(!ubi->move_to_put);
933 ubi->move_from = ubi->move_to = NULL; 941 ubi->move_from = ubi->move_to = NULL;
934 ubi->move_to_put = ubi->wl_scheduled = 0; 942 ubi->wl_scheduled = 0;
935 spin_unlock(&ubi->wl_lock); 943 spin_unlock(&ubi->wl_lock);
936 944
937 e1 = NULL; 945 e1 = NULL;
938 err = schedule_erase(ubi, e2, 0); 946 err = schedule_erase(ubi, e2, torture);
939 if (err) 947 if (err)
940 goto out_error; 948 goto out_error;
941 949
@@ -1324,7 +1332,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
1324 up_write(&ubi->work_sem); 1332 up_write(&ubi->work_sem);
1325 1333
1326 /* 1334 /*
1327 * And in case last was the WL worker and it cancelled the LEB 1335 * And in case last was the WL worker and it canceled the LEB
1328 * movement, flush again. 1336 * movement, flush again.
1329 */ 1337 */
1330 while (ubi->works_count) { 1338 while (ubi->works_count) {