aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-12-05 06:37:02 -0500
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-12-05 06:46:50 -0500
commit6fa6f5bbc3a2ad833a3d4b798140602004f70f5a (patch)
tree96aa4b8b9f672c098f12db1d359d8e942c8a80aa /drivers/mtd
parent3c98b0a043f25fa44b289c2f35b9d6ad1d859ac9 (diff)
UBI: handle write errors in WL worker
When a PEB is moved and a write error happens, UBI switches to R/O mode, which is wrong, because we just copy the data and may select a different PEB and re-try this. This patch fixes WL worker's behavior. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/ubi/eba.c37
-rw-r--r--drivers/mtd/ubi/wl.c32
2 files changed, 44 insertions, 25 deletions
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 2e4d6bf94582..048a606cebde 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -949,10 +949,14 @@ write_error:
949 * This function copies logical eraseblock from physical eraseblock @from to 949 * This function copies logical eraseblock from physical eraseblock @from to
950 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 950 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
951 * function. Returns: 951 * function. Returns:
952 * o %0 in case of success; 952 * o %0 in case of success;
953 * o %1 if the operation was canceled and should be tried later (e.g., 953 * o %1 if the operation was canceled because the volume is being deleted
954 * because a bit-flip was detected at the target PEB); 954 * or because the PEB was put meanwhile;
955 * o %2 if the volume is being deleted and this LEB should not be moved. 955 * o %2 if the operation was canceled because there was a write error to the
956 * target PEB;
957 * o %-EAGAIN if the operation was canceled because a bit-flip was detected
958 * in the target PEB;
959 * o a negative error code in case of failure.
956 */ 960 */
957int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 961int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
958 struct ubi_vid_hdr *vid_hdr) 962 struct ubi_vid_hdr *vid_hdr)
@@ -978,7 +982,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
978 /* 982 /*
979 * Note, we may race with volume deletion, which means that the volume 983 * Note, we may race with volume deletion, which means that the volume
980 * this logical eraseblock belongs to might be being deleted. Since the 984 * this logical eraseblock belongs to might be being deleted. Since the
981 * volume deletion unmaps all the volume's logical eraseblocks, it will 985 * volume deletion un-maps all the volume's logical eraseblocks, it will
982 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. 986 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
983 */ 987 */
984 vol = ubi->volumes[idx]; 988 vol = ubi->volumes[idx];
@@ -986,7 +990,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
986 /* No need to do further work, cancel */ 990 /* No need to do further work, cancel */
987 dbg_eba("volume %d is being removed, cancel", vol_id); 991 dbg_eba("volume %d is being removed, cancel", vol_id);
988 spin_unlock(&ubi->volumes_lock); 992 spin_unlock(&ubi->volumes_lock);
989 return 2; 993 return 1;
990 } 994 }
991 spin_unlock(&ubi->volumes_lock); 995 spin_unlock(&ubi->volumes_lock);
992 996
@@ -1023,7 +1027,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1023 1027
1024 /* 1028 /*
1025 * OK, now the LEB is locked and we can safely start moving it. Since 1029 * OK, now the LEB is locked and we can safely start moving it. Since
1026 * this function utilizes thie @ubi->peb1_buf buffer which is shared 1030 * this function utilizes the @ubi->peb1_buf buffer which is shared
1027 * with some other functions, so lock the buffer by taking the 1031 * with some other functions, so lock the buffer by taking the
1028 * @ubi->buf_mutex. 1032 * @ubi->buf_mutex.
1029 */ 1033 */
@@ -1068,8 +1072,11 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1068 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 1072 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
1069 1073
1070 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1074 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1071 if (err) 1075 if (err) {
1076 if (err == -EIO)
1077 err = 2;
1072 goto out_unlock_buf; 1078 goto out_unlock_buf;
1079 }
1073 1080
1074 cond_resched(); 1081 cond_resched();
1075 1082
@@ -1079,14 +1086,17 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1079 if (err != UBI_IO_BITFLIPS) 1086 if (err != UBI_IO_BITFLIPS)
1080 ubi_warn("cannot read VID header back from PEB %d", to); 1087 ubi_warn("cannot read VID header back from PEB %d", to);
1081 else 1088 else
1082 err = 1; 1089 err = -EAGAIN;
1083 goto out_unlock_buf; 1090 goto out_unlock_buf;
1084 } 1091 }
1085 1092
1086 if (data_size > 0) { 1093 if (data_size > 0) {
1087 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); 1094 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
1088 if (err) 1095 if (err) {
1096 if (err == -EIO)
1097 err = 2;
1089 goto out_unlock_buf; 1098 goto out_unlock_buf;
1099 }
1090 1100
1091 cond_resched(); 1101 cond_resched();
1092 1102
@@ -1101,15 +1111,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1101 ubi_warn("cannot read data back from PEB %d", 1111 ubi_warn("cannot read data back from PEB %d",
1102 to); 1112 to);
1103 else 1113 else
1104 err = 1; 1114 err = -EAGAIN;
1105 goto out_unlock_buf; 1115 goto out_unlock_buf;
1106 } 1116 }
1107 1117
1108 cond_resched(); 1118 cond_resched();
1109 1119
1110 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { 1120 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
1111 ubi_warn("read data back from PEB %d - it is different", 1121 ubi_warn("read data back from PEB %d and it is "
1112 to); 1122 "different", to);
1123 err = -EINVAL;
1113 goto out_unlock_buf; 1124 goto out_unlock_buf;
1114 } 1125 }
1115 } 1126 }
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 442099d76ec9..abf65ea414e7 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -738,7 +738,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
738static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 738static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
739 int cancel) 739 int cancel)
740{ 740{
741 int err, scrubbing = 0; 741 int err, scrubbing = 0, torture = 0;
742 struct ubi_wl_prot_entry *uninitialized_var(pe); 742 struct ubi_wl_prot_entry *uninitialized_var(pe);
743 struct ubi_wl_entry *e1, *e2; 743 struct ubi_wl_entry *e1, *e2;
744 struct ubi_vid_hdr *vid_hdr; 744 struct ubi_vid_hdr *vid_hdr;
@@ -842,20 +842,26 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
842 842
843 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 843 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
844 if (err) { 844 if (err) {
845 845 if (err == -EAGAIN)
846 goto out_not_moved;
846 if (err < 0) 847 if (err < 0)
847 goto out_error; 848 goto out_error;
848 if (err == 1) 849 if (err == 2) {
850 /* Target PEB write error, torture it */
851 torture = 1;
849 goto out_not_moved; 852 goto out_not_moved;
853 }
850 854
851 /* 855 /*
852 * For some reason the LEB was not moved - it might be because 856 * The LEB has not been moved because the volume is being
853 * the volume is being deleted. We should prevent this PEB from 857 * deleted or the PEB has been put meanwhile. We should prevent
854 * being selected for wear-levelling movement for some "time", 858 * this PEB from being selected for wear-leveling movement
855 * so put it to the protection tree. 859 * again, so put it to the protection tree.
856 */ 860 */
857 861
858 dbg_wl("cancelled moving PEB %d", e1->pnum); 862 dbg_wl("canceled moving PEB %d", e1->pnum);
863 ubi_assert(err == 1);
864
859 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); 865 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
860 if (!pe) { 866 if (!pe) {
861 err = -ENOMEM; 867 err = -ENOMEM;
@@ -920,9 +926,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
920 /* 926 /*
921 * For some reasons the LEB was not moved, might be an error, might be 927 * For some reasons the LEB was not moved, might be an error, might be
922 * something else. @e1 was not changed, so return it back. @e2 might 928 * something else. @e1 was not changed, so return it back. @e2 might
923 * be changed, schedule it for erasure. 929 * have been changed, schedule it for erasure.
924 */ 930 */
925out_not_moved: 931out_not_moved:
932 dbg_wl("canceled moving PEB %d", e1->pnum);
926 ubi_free_vid_hdr(ubi, vid_hdr); 933 ubi_free_vid_hdr(ubi, vid_hdr);
927 vid_hdr = NULL; 934 vid_hdr = NULL;
928 spin_lock(&ubi->wl_lock); 935 spin_lock(&ubi->wl_lock);
@@ -930,12 +937,13 @@ out_not_moved:
930 wl_tree_add(e1, &ubi->scrub); 937 wl_tree_add(e1, &ubi->scrub);
931 else 938 else
932 wl_tree_add(e1, &ubi->used); 939 wl_tree_add(e1, &ubi->used);
940 ubi_assert(!ubi->move_to_put);
933 ubi->move_from = ubi->move_to = NULL; 941 ubi->move_from = ubi->move_to = NULL;
934 ubi->move_to_put = ubi->wl_scheduled = 0; 942 ubi->wl_scheduled = 0;
935 spin_unlock(&ubi->wl_lock); 943 spin_unlock(&ubi->wl_lock);
936 944
937 e1 = NULL; 945 e1 = NULL;
938 err = schedule_erase(ubi, e2, 0); 946 err = schedule_erase(ubi, e2, torture);
939 if (err) 947 if (err)
940 goto out_error; 948 goto out_error;
941 949
@@ -1324,7 +1332,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
1324 up_write(&ubi->work_sem); 1332 up_write(&ubi->work_sem);
1325 1333
1326 /* 1334 /*
1327 * And in case last was the WL worker and it cancelled the LEB 1335 * And in case last was the WL worker and it canceled the LEB
1328 * movement, flush again. 1336 * movement, flush again.
1329 */ 1337 */
1330 while (ubi->works_count) { 1338 while (ubi->works_count) {