aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi/wl.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/ubi/wl.c')
-rw-r--r--drivers/mtd/ubi/wl.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index acb5520f7f3d..2b2472300610 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -83,7 +83,7 @@
83 * used. The former state corresponds to the @wl->free tree. The latter state 83 * used. The former state corresponds to the @wl->free tree. The latter state
84 * is split up on several sub-states: 84 * is split up on several sub-states:
85 * o the WL movement is allowed (@wl->used tree); 85 * o the WL movement is allowed (@wl->used tree);
86 * o the WL movement is disallowed (@wl->erroneous) becouse the PEB is 86 * o the WL movement is disallowed (@wl->erroneous) because the PEB is
87 * erroneous - e.g., there was a read error; 87 * erroneous - e.g., there was a read error;
88 * o the WL movement is temporarily prohibited (@wl->pq queue); 88 * o the WL movement is temporarily prohibited (@wl->pq queue);
89 * o scrubbing is needed (@wl->scrub tree). 89 * o scrubbing is needed (@wl->scrub tree).
@@ -744,8 +744,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
744 * given, so we have a situation when it has not yet 744 * given, so we have a situation when it has not yet
745 * had a chance to write it, because it was preempted. 745 * had a chance to write it, because it was preempted.
746 * So add this PEB to the protection queue so far, 746 * So add this PEB to the protection queue so far,
747 * because presubably more data will be written there 747 * because presumably more data will be written there
748 * (including the missin VID header), and then we'll 748 * (including the missing VID header), and then we'll
749 * move it. 749 * move it.
750 */ 750 */
751 dbg_wl("PEB %d has no VID header", e1->pnum); 751 dbg_wl("PEB %d has no VID header", e1->pnum);
@@ -790,8 +790,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
790 * not switch to R/O mode in this case, and give the 790 * not switch to R/O mode in this case, and give the
791 * upper layers a possibility to recover from this, 791 * upper layers a possibility to recover from this,
792 * e.g. by unmapping corresponding LEB. Instead, just 792 * e.g. by unmapping corresponding LEB. Instead, just
793 * put thie PEB to the @ubi->erroneus list to prevent 793 * put this PEB to the @ubi->erroneous list to prevent
794 * UBI from trying to move the over and over again. 794 * UBI from trying to move it over and over again.
795 */ 795 */
796 if (ubi->erroneous_peb_count > ubi->max_erroneous) { 796 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
797 ubi_err("too many erroneous eraseblocks (%d)", 797 ubi_err("too many erroneous eraseblocks (%d)",
@@ -1045,7 +1045,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1045 /* 1045 /*
1046 * If this is not %-EIO, we have no idea what to do. Scheduling 1046 * If this is not %-EIO, we have no idea what to do. Scheduling
1047 * this physical eraseblock for erasure again would cause 1047 * this physical eraseblock for erasure again would cause
1048 * errors again and again. Well, lets switch to RO mode. 1048 * errors again and again. Well, lets switch to R/O mode.
1049 */ 1049 */
1050 goto out_ro; 1050 goto out_ro;
1051 } 1051 }
@@ -1161,7 +1161,7 @@ retry:
1161 rb_erase(&e->u.rb, &ubi->erroneous); 1161 rb_erase(&e->u.rb, &ubi->erroneous);
1162 ubi->erroneous_peb_count -= 1; 1162 ubi->erroneous_peb_count -= 1;
1163 ubi_assert(ubi->erroneous_peb_count >= 0); 1163 ubi_assert(ubi->erroneous_peb_count >= 0);
1164 /* Erronious PEBs should be tortured */ 1164 /* Erroneous PEBs should be tortured */
1165 torture = 1; 1165 torture = 1;
1166 } else { 1166 } else {
1167 err = prot_queue_del(ubi, e->pnum); 1167 err = prot_queue_del(ubi, e->pnum);