aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2009-06-08 12:28:18 -0400
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2009-06-10 09:13:27 -0400
commit815bc5f8fe516f55291aef90f2142073821e7a9c (patch)
tree6e56f146f9653c3772738488b2ec137d7c8e4cae /drivers/mtd/ubi
parent21d08bbcb19d9cdef8ab5b584f25b50d842068e9 (diff)
UBI: fix multiple spelling typos
Some of the typos were indicated by Adrian Hunter, some by 'aspell'. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd/ubi')
-rw-r--r--drivers/mtd/ubi/eba.c2
-rw-r--r--drivers/mtd/ubi/io.c2
-rw-r--r--drivers/mtd/ubi/ubi.h4
-rw-r--r--drivers/mtd/ubi/wl.c14
4 files changed, 11 insertions, 11 deletions
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 632b95f3ff3f..b6565561218e 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -951,7 +951,7 @@ write_error:
951 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 951 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
952 * function. Returns: 952 * function. Returns:
953 * o %0 in case of success; 953 * o %0 in case of success;
954 * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, or %MOVE_CANCEL_BITFLIPS; 954 * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_CANCEL_BITFLIPS, etc;
955 * o a negative error code in case of failure. 955 * o a negative error code in case of failure.
956 */ 956 */
957int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 957int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index ac6604aeb728..effaff28bab1 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -899,7 +899,7 @@ bad:
899 * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected 899 * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
900 * and corrected by the flash driver; this is harmless but may indicate that 900 * and corrected by the flash driver; this is harmless but may indicate that
901 * this eraseblock may become bad soon; 901 * this eraseblock may become bad soon;
902 * o %UBI_IO_BAD_VID_HRD if the volume identifier header is corrupted (a CRC 902 * o %UBI_IO_BAD_VID_HDR if the volume identifier header is corrupted (a CRC
903 * error detected); 903 * error detected);
904 * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID 904 * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID
905 * header there); 905 * header there);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 82da62bde413..70ce48b95b64 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -333,8 +333,8 @@ struct ubi_wl_entry;
333 * protected from the wear-leveling worker) 333 * protected from the wear-leveling worker)
334 * @pq_head: protection queue head 334 * @pq_head: protection queue head
335 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, 335 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
336 * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works and 336 * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
337 * @erroneous_peb_count fields 337 * @erroneous, and @erroneous_peb_count fields
338 * @move_mutex: serializes eraseblock moves 338 * @move_mutex: serializes eraseblock moves
339 * @work_sem: synchronizes the WL worker with use tasks 339 * @work_sem: synchronizes the WL worker with use tasks
340 * @wl_scheduled: non-zero if the wear-leveling was scheduled 340 * @wl_scheduled: non-zero if the wear-leveling was scheduled
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index acb5520f7f3d..2b2472300610 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -83,7 +83,7 @@
83 * used. The former state corresponds to the @wl->free tree. The latter state 83 * used. The former state corresponds to the @wl->free tree. The latter state
84 * is split up on several sub-states: 84 * is split up on several sub-states:
85 * o the WL movement is allowed (@wl->used tree); 85 * o the WL movement is allowed (@wl->used tree);
86 * o the WL movement is disallowed (@wl->erroneous) becouse the PEB is 86 * o the WL movement is disallowed (@wl->erroneous) because the PEB is
87 * erroneous - e.g., there was a read error; 87 * erroneous - e.g., there was a read error;
88 * o the WL movement is temporarily prohibited (@wl->pq queue); 88 * o the WL movement is temporarily prohibited (@wl->pq queue);
89 * o scrubbing is needed (@wl->scrub tree). 89 * o scrubbing is needed (@wl->scrub tree).
@@ -744,8 +744,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
744 * given, so we have a situation when it has not yet 744 * given, so we have a situation when it has not yet
745 * had a chance to write it, because it was preempted. 745 * had a chance to write it, because it was preempted.
746 * So add this PEB to the protection queue so far, 746 * So add this PEB to the protection queue so far,
747 * because presubably more data will be written there 747 * because presumably more data will be written there
748 * (including the missin VID header), and then we'll 748 * (including the missing VID header), and then we'll
749 * move it. 749 * move it.
750 */ 750 */
751 dbg_wl("PEB %d has no VID header", e1->pnum); 751 dbg_wl("PEB %d has no VID header", e1->pnum);
@@ -790,8 +790,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
790 * not switch to R/O mode in this case, and give the 790 * not switch to R/O mode in this case, and give the
791 * upper layers a possibility to recover from this, 791 * upper layers a possibility to recover from this,
792 * e.g. by unmapping corresponding LEB. Instead, just 792 * e.g. by unmapping corresponding LEB. Instead, just
793 * put thie PEB to the @ubi->erroneus list to prevent 793 * put this PEB to the @ubi->erroneous list to prevent
794 * UBI from trying to move the over and over again. 794 * UBI from trying to move it over and over again.
795 */ 795 */
796 if (ubi->erroneous_peb_count > ubi->max_erroneous) { 796 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
797 ubi_err("too many erroneous eraseblocks (%d)", 797 ubi_err("too many erroneous eraseblocks (%d)",
@@ -1045,7 +1045,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1045 /* 1045 /*
1046 * If this is not %-EIO, we have no idea what to do. Scheduling 1046 * If this is not %-EIO, we have no idea what to do. Scheduling
1047 * this physical eraseblock for erasure again would cause 1047 * this physical eraseblock for erasure again would cause
1048 * errors again and again. Well, lets switch to RO mode. 1048 * errors again and again. Well, lets switch to R/O mode.
1049 */ 1049 */
1050 goto out_ro; 1050 goto out_ro;
1051 } 1051 }
@@ -1161,7 +1161,7 @@ retry:
1161 rb_erase(&e->u.rb, &ubi->erroneous); 1161 rb_erase(&e->u.rb, &ubi->erroneous);
1162 ubi->erroneous_peb_count -= 1; 1162 ubi->erroneous_peb_count -= 1;
1163 ubi_assert(ubi->erroneous_peb_count >= 0); 1163 ubi_assert(ubi->erroneous_peb_count >= 0);
1164 /* Erronious PEBs should be tortured */ 1164 /* Erroneous PEBs should be tortured */
1165 torture = 1; 1165 torture = 1;
1166 } else { 1166 } else {
1167 err = prot_queue_del(ubi, e->pnum); 1167 err = prot_queue_del(ubi, e->pnum);