aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2007-07-18 06:42:10 -0400
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2007-07-18 09:59:09 -0400
commit784c145444e7dd58ae740d406155b72ac658f151 (patch)
tree31c304913db2011a3a9f09da9dbaf2f74004d887 /drivers/mtd
parent63b6c1ed56f69fdd35122dc591164587e3407ba0 (diff)
UBI: fix error handling in erase worker
Do not switch to read-only mode in case of -EINTR and some other obvious cases. Switch to RO mode only when we do not know what is the error. Reported-by: Vinit Agnihotri <vinit.agnihotri@gmail.com> Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/ubi/wl.c89
1 files changed, 48 insertions, 41 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index d512cf16350d..9de953762097 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1060,9 +1060,8 @@ out_unlock:
1060static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 1060static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1061 int cancel) 1061 int cancel)
1062{ 1062{
1063 int err;
1064 struct ubi_wl_entry *e = wl_wrk->e; 1063 struct ubi_wl_entry *e = wl_wrk->e;
1065 int pnum = e->pnum; 1064 int pnum = e->pnum, err, need;
1066 1065
1067 if (cancel) { 1066 if (cancel) {
1068 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 1067 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
@@ -1097,62 +1096,70 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1097 kfree(wl_wrk); 1096 kfree(wl_wrk);
1098 kmem_cache_free(wl_entries_slab, e); 1097 kmem_cache_free(wl_entries_slab, e);
1099 1098
1100 if (err != -EIO) { 1099 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1100 err == -EBUSY) {
1101 int err1;
1102
1103 /* Re-schedule the LEB for erasure */
1104 err1 = schedule_erase(ubi, e, 0);
1105 if (err1) {
1106 err = err1;
1107 goto out_ro;
1108 }
1109 return err;
1110 } else if (err != -EIO) {
1101 /* 1111 /*
1102 * If this is not %-EIO, we have no idea what to do. Scheduling 1112 * If this is not %-EIO, we have no idea what to do. Scheduling
1103 * this physical eraseblock for erasure again would cause 1113 * this physical eraseblock for erasure again would cause
1104 * errors again and again. Well, lets switch to RO mode. 1114 * errors again and again. Well, lets switch to RO mode.
1105 */ 1115 */
1106 ubi_ro_mode(ubi); 1116 goto out_ro;
1107 return err;
1108 } 1117 }
1109 1118
1110 /* It is %-EIO, the PEB went bad */ 1119 /* It is %-EIO, the PEB went bad */
1111 1120
1112 if (!ubi->bad_allowed) { 1121 if (!ubi->bad_allowed) {
1113 ubi_err("bad physical eraseblock %d detected", pnum); 1122 ubi_err("bad physical eraseblock %d detected", pnum);
1114 ubi_ro_mode(ubi); 1123 goto out_ro;
1115 err = -EIO; 1124 }
1116 } else {
1117 int need;
1118
1119 spin_lock(&ubi->volumes_lock);
1120 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1121 if (need > 0) {
1122 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1123 ubi->avail_pebs -= need;
1124 ubi->rsvd_pebs += need;
1125 ubi->beb_rsvd_pebs += need;
1126 if (need > 0)
1127 ubi_msg("reserve more %d PEBs", need);
1128 }
1129 1125
1130 if (ubi->beb_rsvd_pebs == 0) { 1126 spin_lock(&ubi->volumes_lock);
1131 spin_unlock(&ubi->volumes_lock); 1127 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1132 ubi_err("no reserved physical eraseblocks"); 1128 if (need > 0) {
1133 ubi_ro_mode(ubi); 1129 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1134 return -EIO; 1130 ubi->avail_pebs -= need;
1135 } 1131 ubi->rsvd_pebs += need;
1132 ubi->beb_rsvd_pebs += need;
1133 if (need > 0)
1134 ubi_msg("reserve more %d PEBs", need);
1135 }
1136 1136
1137 if (ubi->beb_rsvd_pebs == 0) {
1137 spin_unlock(&ubi->volumes_lock); 1138 spin_unlock(&ubi->volumes_lock);
1138 ubi_msg("mark PEB %d as bad", pnum); 1139 ubi_err("no reserved physical eraseblocks");
1140 goto out_ro;
1141 }
1139 1142
1140 err = ubi_io_mark_bad(ubi, pnum); 1143 spin_unlock(&ubi->volumes_lock);
1141 if (err) { 1144 ubi_msg("mark PEB %d as bad", pnum);
1142 ubi_ro_mode(ubi);
1143 return err;
1144 }
1145 1145
1146 spin_lock(&ubi->volumes_lock); 1146 err = ubi_io_mark_bad(ubi, pnum);
1147 ubi->beb_rsvd_pebs -= 1; 1147 if (err)
1148 ubi->bad_peb_count += 1; 1148 goto out_ro;
1149 ubi->good_peb_count -= 1; 1149
1150 ubi_calculate_reserved(ubi); 1150 spin_lock(&ubi->volumes_lock);
1151 if (ubi->beb_rsvd_pebs == 0) 1151 ubi->beb_rsvd_pebs -= 1;
1152 ubi_warn("last PEB from the reserved pool was used"); 1152 ubi->bad_peb_count += 1;
1153 spin_unlock(&ubi->volumes_lock); 1153 ubi->good_peb_count -= 1;
1154 } 1154 ubi_calculate_reserved(ubi);
1155 if (ubi->beb_rsvd_pebs == 0)
1156 ubi_warn("last PEB from the reserved pool was used");
1157 spin_unlock(&ubi->volumes_lock);
1158
1159 return err;
1155 1160
1161out_ro:
1162 ubi_ro_mode(ubi);
1156 return err; 1163 return err;
1157} 1164}
1158 1165