aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorShmulik Ladkani <shmulik.ladkani@gmail.com>2012-07-04 04:06:01 -0400
committerArtem Bityutskiy <artem.bityutskiy@linux.intel.com>2012-09-04 02:38:58 -0400
commit37f758a036da56c7cff81b68d1d872752079eb6c (patch)
tree490edbbde943a709f79b68d217f39e74ba238e13 /drivers/mtd
parent8beeb3bb9df8caba36ad3e4f226255dff9c92556 (diff)
UBI: limit amount of reserved eraseblocks for bad PEB handling
The existing mechanism of reserving PEBs for bad PEB handling has two flaws: - It is calculated as a percentage of good PEBs instead of total PEBs. - There's no limit on the amount of PEBs UBI reserves for future bad eraseblock handling. This patch changes the mechanism to overcome these flaws. The desired level of PEBs reserved for bad PEB handling (beb_rsvd_level) is set to the maximum expected bad eraseblocks (bad_peb_limit) minus the existing number of bad eraseblocks (bad_peb_count). The actual amount of PEBs reserved for bad PEB handling is usually set to the desired level (but in some circumstances may be lower than the desired level, e.g. when attaching to a device that has too few available PEBs to satisfy the desired level). In the case where the device has too many bad PEBs (above the expected limit), then the desired level, and the actual amount of PEBs reserved are set to zero. No PEBs will be set aside for future bad eraseblock handling - even if some PEBs are made available (e.g. by shrinking a volume). If another PEB goes bad, and there are available PEBs, then the eraseblock will be marked bad (consuming one available PEB). But if there are no available PEBs, ubi will go into readonly mode. Signed-off-by: Shmulik Ladkani <shmulik.ladkani@gmail.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/ubi/misc.c16
-rw-r--r--drivers/mtd/ubi/wl.c46
2 files changed, 41 insertions, 21 deletions
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 8bbfb444b89..d089df05548 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -121,10 +121,18 @@ void ubi_update_reserved(struct ubi_device *ubi)
121 */ 121 */
122void ubi_calculate_reserved(struct ubi_device *ubi) 122void ubi_calculate_reserved(struct ubi_device *ubi)
123{ 123{
124 ubi->beb_rsvd_level = ubi->good_peb_count/100; 124 /*
125 ubi->beb_rsvd_level *= CONFIG_MTD_UBI_BEB_RESERVE; 125 * Calculate the actual number of PEBs currently needed to be reserved
126 if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS) 126 * for future bad eraseblock handling.
127 ubi->beb_rsvd_level = MIN_RESEVED_PEBS; 127 */
128 ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
129 if (ubi->beb_rsvd_level < 0) {
130 ubi->beb_rsvd_level = 0;
131 ubi_warn("number of bad PEBs (%d) is above the expected limit "
132 "(%d), not reserving any PEBs for bad PEB handling, "
133 "will use available PEBs (if any)",
134 ubi->bad_peb_count, ubi->bad_peb_limit);
135 }
128} 136}
129 137
130/** 138/**
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index b6be644e7b8..bd05276252f 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -978,9 +978,10 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
978 int cancel) 978 int cancel)
979{ 979{
980 struct ubi_wl_entry *e = wl_wrk->e; 980 struct ubi_wl_entry *e = wl_wrk->e;
981 int pnum = e->pnum, err, need; 981 int pnum = e->pnum;
982 int vol_id = wl_wrk->vol_id; 982 int vol_id = wl_wrk->vol_id;
983 int lnum = wl_wrk->lnum; 983 int lnum = wl_wrk->lnum;
984 int err, available_consumed = 0;
984 985
985 if (cancel) { 986 if (cancel) {
986 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 987 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
@@ -1045,20 +1046,14 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1045 } 1046 }
1046 1047
1047 spin_lock(&ubi->volumes_lock); 1048 spin_lock(&ubi->volumes_lock);
1048 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1049 if (need > 0) {
1050 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1051 ubi->avail_pebs -= need;
1052 ubi->rsvd_pebs += need;
1053 ubi->beb_rsvd_pebs += need;
1054 if (need > 0)
1055 ubi_msg("reserve more %d PEBs", need);
1056 }
1057
1058 if (ubi->beb_rsvd_pebs == 0) { 1049 if (ubi->beb_rsvd_pebs == 0) {
1059 spin_unlock(&ubi->volumes_lock); 1050 if (ubi->avail_pebs == 0) {
1060 ubi_err("no reserved physical eraseblocks"); 1051 spin_unlock(&ubi->volumes_lock);
1061 goto out_ro; 1052 ubi_err("no reserved/available physical eraseblocks");
1053 goto out_ro;
1054 }
1055 ubi->avail_pebs -= 1;
1056 available_consumed = 1;
1062 } 1057 }
1063 spin_unlock(&ubi->volumes_lock); 1058 spin_unlock(&ubi->volumes_lock);
1064 1059
@@ -1068,19 +1063,36 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1068 goto out_ro; 1063 goto out_ro;
1069 1064
1070 spin_lock(&ubi->volumes_lock); 1065 spin_lock(&ubi->volumes_lock);
1071 ubi->beb_rsvd_pebs -= 1; 1066 if (ubi->beb_rsvd_pebs > 0) {
1067 if (available_consumed) {
1068 /*
1069 * The amount of reserved PEBs increased since we last
1070 * checked.
1071 */
1072 ubi->avail_pebs += 1;
1073 available_consumed = 0;
1074 }
1075 ubi->beb_rsvd_pebs -= 1;
1076 }
1072 ubi->bad_peb_count += 1; 1077 ubi->bad_peb_count += 1;
1073 ubi->good_peb_count -= 1; 1078 ubi->good_peb_count -= 1;
1074 ubi_calculate_reserved(ubi); 1079 ubi_calculate_reserved(ubi);
1075 if (ubi->beb_rsvd_pebs) 1080 if (available_consumed)
1081 ubi_warn("no PEBs in the reserved pool, used an available PEB");
1082 else if (ubi->beb_rsvd_pebs)
1076 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs); 1083 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1077 else 1084 else
1078 ubi_warn("last PEB from the reserved pool was used"); 1085 ubi_warn("last PEB from the reserve was used");
1079 spin_unlock(&ubi->volumes_lock); 1086 spin_unlock(&ubi->volumes_lock);
1080 1087
1081 return err; 1088 return err;
1082 1089
1083out_ro: 1090out_ro:
1091 if (available_consumed) {
1092 spin_lock(&ubi->volumes_lock);
1093 ubi->avail_pebs += 1;
1094 spin_unlock(&ubi->volumes_lock);
1095 }
1084 ubi_ro_mode(ubi); 1096 ubi_ro_mode(ubi);
1085 return err; 1097 return err;
1086} 1098}