diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-08 07:40:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-08 07:40:45 -0400 |
commit | e9eca4de957ac33744fb994ccacd4a5102e445a8 (patch) | |
tree | fc1cf7c808d3daa365a56856321834d030fcf10d /drivers/mtd/ubi | |
parent | 1929041bd8afeb3995b7c68d6f16e03422848a4c (diff) | |
parent | 76ac66e469f084d41742ba08923de76fbdc7dce3 (diff) |
Merge tag 'upstream-3.7-rc1-fastmap' of git://git.infradead.org/linux-ubi
Pull UBI fastmap changes from Artem Bityutskiy:
"This pull request contains the UBI fastmap support implemented by
Richard Weinberger from Linutronix. Fastmap is designed to address
UBI's slow scanning issues. Namely, it introduces a new on-flash
data-structure called "fastmap", which stores the information about
logical<->physical eraseblocks mappings. So now to get this
information just read the fastmap, instead of doing full scan. More
information here can be found in Richard's announcement in LKML
(Subject: UBI: Fastmap request for inclusion (v19)):
http://thread.gmane.org/gmane.linux.kernel/1364922/focus=1369109
One thing I want to explicitly say is that fastmap did not have large
enough linux-next exposure. It is partially my fault - I did not
respond quickly enough. I _really_ apologize for this. But it had
good testing and disabled by default, so I do not expect that we'll
break anything.
Fastmap is declared as experimental so far, and it is off by default.
We did declare that the on-flash format may be changed. The reason
for this is that no one used it in real production so far, so there is
a high risk that something is missing. Besides, we do not have
user-space tools supporting fastmap so far.
Nevertheless, I suggest we merge this feature. Many people want UBI's
scanning bottleneck to be fixed and merging fastmap now should
accelerate its production use. The plan is to make it bullet-prove,
somewhat clean-up, and make it the default for UBI. I do not know how
many kernel releases will it take.
Basically, I what I want to do for fastmap is something like Linus did
for btrfs few years ago."
* tag 'upstream-3.7-rc1-fastmap' of git://git.infradead.org/linux-ubi:
UBI: Wire-up fastmap
UBI: Add fastmap core
UBI: Add fastmap support to the WL sub-system
UBI: Add fastmap stuff to attach.c
UBI: Wire-up ->fm_sem
UBI: Add fastmap bits to build.c
UBI: Add self_check_eba()
UBI: Export next_sqnum()
UBI: Add fastmap stuff to ubi.h
UBI: Add fastmap on-flash data structures
Diffstat (limited to 'drivers/mtd/ubi')
-rw-r--r-- | drivers/mtd/ubi/Kconfig | 21 | ||||
-rw-r--r-- | drivers/mtd/ubi/Makefile | 1 | ||||
-rw-r--r-- | drivers/mtd/ubi/attach.c | 386 | ||||
-rw-r--r-- | drivers/mtd/ubi/build.c | 70 | ||||
-rw-r--r-- | drivers/mtd/ubi/eba.c | 126 | ||||
-rw-r--r-- | drivers/mtd/ubi/fastmap.c | 1537 | ||||
-rw-r--r-- | drivers/mtd/ubi/ubi-media.h | 137 | ||||
-rw-r--r-- | drivers/mtd/ubi/ubi.h | 118 | ||||
-rw-r--r-- | drivers/mtd/ubi/wl.c | 599 |
9 files changed, 2784 insertions, 211 deletions
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig index 271a842f8c39..36663af56d89 100644 --- a/drivers/mtd/ubi/Kconfig +++ b/drivers/mtd/ubi/Kconfig | |||
@@ -56,6 +56,27 @@ config MTD_UBI_BEB_LIMIT | |||
56 | 56 | ||
57 | Leave the default value if unsure. | 57 | Leave the default value if unsure. |
58 | 58 | ||
59 | config MTD_UBI_FASTMAP | ||
60 | bool "UBI Fastmap (Experimental feature)" | ||
61 | default n | ||
62 | help | ||
63 | Important: this feature is experimental so far and the on-flash | ||
64 | format for fastmap may change in the next kernel versions | ||
65 | |||
66 | Fastmap is a mechanism which allows attaching an UBI device | ||
67 | in nearly constant time. Instead of scanning the whole MTD device it | ||
68 | only has to locate a checkpoint (called fastmap) on the device. | ||
69 | The on-flash fastmap contains all information needed to attach | ||
70 | the device. Using fastmap makes only sense on large devices where | ||
71 | attaching by scanning takes long. UBI will not automatically install | ||
72 | a fastmap on old images, but you can set the UBI module parameter | ||
73 | fm_autoconvert to 1 if you want so. Please note that fastmap-enabled | ||
74 | images are still usable with UBI implementations without | ||
75 | fastmap support. On typical flash devices the whole fastmap fits | ||
76 | into one PEB. UBI will reserve PEBs to hold two fastmaps. | ||
77 | |||
78 | If in doubt, say "N". | ||
79 | |||
59 | config MTD_UBI_GLUEBI | 80 | config MTD_UBI_GLUEBI |
60 | tristate "MTD devices emulation driver (gluebi)" | 81 | tristate "MTD devices emulation driver (gluebi)" |
61 | help | 82 | help |
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile index a0803ac74712..b46b0c978581 100644 --- a/drivers/mtd/ubi/Makefile +++ b/drivers/mtd/ubi/Makefile | |||
@@ -2,5 +2,6 @@ obj-$(CONFIG_MTD_UBI) += ubi.o | |||
2 | 2 | ||
3 | ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o | 3 | ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o |
4 | ubi-y += misc.o debug.o | 4 | ubi-y += misc.o debug.o |
5 | ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o | ||
5 | 6 | ||
6 | obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o | 7 | obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o |
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c index f7adf53e4f45..fec406b4553d 100644 --- a/drivers/mtd/ubi/attach.c +++ b/drivers/mtd/ubi/attach.c | |||
@@ -300,7 +300,7 @@ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai, | |||
300 | } | 300 | } |
301 | 301 | ||
302 | /** | 302 | /** |
303 | * compare_lebs - find out which logical eraseblock is newer. | 303 | * ubi_compare_lebs - find out which logical eraseblock is newer. |
304 | * @ubi: UBI device description object | 304 | * @ubi: UBI device description object |
305 | * @aeb: first logical eraseblock to compare | 305 | * @aeb: first logical eraseblock to compare |
306 | * @pnum: physical eraseblock number of the second logical eraseblock to | 306 | * @pnum: physical eraseblock number of the second logical eraseblock to |
@@ -319,7 +319,7 @@ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai, | |||
319 | * o bit 2 is cleared: the older LEB is not corrupted; | 319 | * o bit 2 is cleared: the older LEB is not corrupted; |
320 | * o bit 2 is set: the older LEB is corrupted. | 320 | * o bit 2 is set: the older LEB is corrupted. |
321 | */ | 321 | */ |
322 | static int compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, | 322 | int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, |
323 | int pnum, const struct ubi_vid_hdr *vid_hdr) | 323 | int pnum, const struct ubi_vid_hdr *vid_hdr) |
324 | { | 324 | { |
325 | void *buf; | 325 | void *buf; |
@@ -337,7 +337,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, | |||
337 | * support these images anymore. Well, those images still work, | 337 | * support these images anymore. Well, those images still work, |
338 | * but only if no unclean reboots happened. | 338 | * but only if no unclean reboots happened. |
339 | */ | 339 | */ |
340 | ubi_err("unsupported on-flash UBI format\n"); | 340 | ubi_err("unsupported on-flash UBI format"); |
341 | return -EINVAL; | 341 | return -EINVAL; |
342 | } | 342 | } |
343 | 343 | ||
@@ -507,7 +507,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, | |||
507 | * sequence numbers. We still can attach these images, unless | 507 | * sequence numbers. We still can attach these images, unless |
508 | * there is a need to distinguish between old and new | 508 | * there is a need to distinguish between old and new |
509 | * eraseblocks, in which case we'll refuse the image in | 509 | * eraseblocks, in which case we'll refuse the image in |
510 | * 'compare_lebs()'. In other words, we attach old clean | 510 | * 'ubi_compare_lebs()'. In other words, we attach old clean |
511 | * images, but refuse attaching old images with duplicated | 511 | * images, but refuse attaching old images with duplicated |
512 | * logical eraseblocks because there was an unclean reboot. | 512 | * logical eraseblocks because there was an unclean reboot. |
513 | */ | 513 | */ |
@@ -523,7 +523,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, | |||
523 | * Now we have to drop the older one and preserve the newer | 523 | * Now we have to drop the older one and preserve the newer |
524 | * one. | 524 | * one. |
525 | */ | 525 | */ |
526 | cmp_res = compare_lebs(ubi, aeb, pnum, vid_hdr); | 526 | cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr); |
527 | if (cmp_res < 0) | 527 | if (cmp_res < 0) |
528 | return cmp_res; | 528 | return cmp_res; |
529 | 529 | ||
@@ -748,7 +748,7 @@ struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi, | |||
748 | /** | 748 | /** |
749 | * check_corruption - check the data area of PEB. | 749 | * check_corruption - check the data area of PEB. |
750 | * @ubi: UBI device description object | 750 | * @ubi: UBI device description object |
751 | * @vid_hrd: the (corrupted) VID header of this PEB | 751 | * @vid_hdr: the (corrupted) VID header of this PEB |
752 | * @pnum: the physical eraseblock number to check | 752 | * @pnum: the physical eraseblock number to check |
753 | * | 753 | * |
754 | * This is a helper function which is used to distinguish between VID header | 754 | * This is a helper function which is used to distinguish between VID header |
@@ -810,6 +810,8 @@ out_unlock: | |||
810 | * @ubi: UBI device description object | 810 | * @ubi: UBI device description object |
811 | * @ai: attaching information | 811 | * @ai: attaching information |
812 | * @pnum: the physical eraseblock number | 812 | * @pnum: the physical eraseblock number |
813 | * @vid: The volume ID of the found volume will be stored in this pointer | ||
814 | * @sqnum: The sqnum of the found volume will be stored in this pointer | ||
813 | * | 815 | * |
814 | * This function reads UBI headers of PEB @pnum, checks them, and adds | 816 | * This function reads UBI headers of PEB @pnum, checks them, and adds |
815 | * information about this PEB to the corresponding list or RB-tree in the | 817 | * information about this PEB to the corresponding list or RB-tree in the |
@@ -817,10 +819,10 @@ out_unlock: | |||
817 | * successfully handled and a negative error code in case of failure. | 819 | * successfully handled and a negative error code in case of failure. |
818 | */ | 820 | */ |
819 | static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, | 821 | static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, |
820 | int pnum) | 822 | int pnum, int *vid, unsigned long long *sqnum) |
821 | { | 823 | { |
822 | long long uninitialized_var(ec); | 824 | long long uninitialized_var(ec); |
823 | int err, bitflips = 0, vol_id, ec_err = 0; | 825 | int err, bitflips = 0, vol_id = -1, ec_err = 0; |
824 | 826 | ||
825 | dbg_bld("scan PEB %d", pnum); | 827 | dbg_bld("scan PEB %d", pnum); |
826 | 828 | ||
@@ -991,14 +993,21 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, | |||
991 | } | 993 | } |
992 | 994 | ||
993 | vol_id = be32_to_cpu(vidh->vol_id); | 995 | vol_id = be32_to_cpu(vidh->vol_id); |
996 | if (vid) | ||
997 | *vid = vol_id; | ||
998 | if (sqnum) | ||
999 | *sqnum = be64_to_cpu(vidh->sqnum); | ||
994 | if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) { | 1000 | if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) { |
995 | int lnum = be32_to_cpu(vidh->lnum); | 1001 | int lnum = be32_to_cpu(vidh->lnum); |
996 | 1002 | ||
997 | /* Unsupported internal volume */ | 1003 | /* Unsupported internal volume */ |
998 | switch (vidh->compat) { | 1004 | switch (vidh->compat) { |
999 | case UBI_COMPAT_DELETE: | 1005 | case UBI_COMPAT_DELETE: |
1000 | ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it", | 1006 | if (vol_id != UBI_FM_SB_VOLUME_ID |
1001 | vol_id, lnum); | 1007 | && vol_id != UBI_FM_DATA_VOLUME_ID) { |
1008 | ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it", | ||
1009 | vol_id, lnum); | ||
1010 | } | ||
1002 | err = add_to_list(ai, pnum, vol_id, lnum, | 1011 | err = add_to_list(ai, pnum, vol_id, lnum, |
1003 | ec, 1, &ai->erase); | 1012 | ec, 1, &ai->erase); |
1004 | if (err) | 1013 | if (err) |
@@ -1121,51 +1130,126 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1121 | } | 1130 | } |
1122 | 1131 | ||
1123 | /** | 1132 | /** |
1133 | * destroy_av - free volume attaching information. | ||
1134 | * @av: volume attaching information | ||
1135 | * @ai: attaching information | ||
1136 | * | ||
1137 | * This function destroys the volume attaching information. | ||
1138 | */ | ||
1139 | static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av) | ||
1140 | { | ||
1141 | struct ubi_ainf_peb *aeb; | ||
1142 | struct rb_node *this = av->root.rb_node; | ||
1143 | |||
1144 | while (this) { | ||
1145 | if (this->rb_left) | ||
1146 | this = this->rb_left; | ||
1147 | else if (this->rb_right) | ||
1148 | this = this->rb_right; | ||
1149 | else { | ||
1150 | aeb = rb_entry(this, struct ubi_ainf_peb, u.rb); | ||
1151 | this = rb_parent(this); | ||
1152 | if (this) { | ||
1153 | if (this->rb_left == &aeb->u.rb) | ||
1154 | this->rb_left = NULL; | ||
1155 | else | ||
1156 | this->rb_right = NULL; | ||
1157 | } | ||
1158 | |||
1159 | kmem_cache_free(ai->aeb_slab_cache, aeb); | ||
1160 | } | ||
1161 | } | ||
1162 | kfree(av); | ||
1163 | } | ||
1164 | |||
1165 | /** | ||
1166 | * destroy_ai - destroy attaching information. | ||
1167 | * @ai: attaching information | ||
1168 | */ | ||
1169 | static void destroy_ai(struct ubi_attach_info *ai) | ||
1170 | { | ||
1171 | struct ubi_ainf_peb *aeb, *aeb_tmp; | ||
1172 | struct ubi_ainf_volume *av; | ||
1173 | struct rb_node *rb; | ||
1174 | |||
1175 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) { | ||
1176 | list_del(&aeb->u.list); | ||
1177 | kmem_cache_free(ai->aeb_slab_cache, aeb); | ||
1178 | } | ||
1179 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) { | ||
1180 | list_del(&aeb->u.list); | ||
1181 | kmem_cache_free(ai->aeb_slab_cache, aeb); | ||
1182 | } | ||
1183 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) { | ||
1184 | list_del(&aeb->u.list); | ||
1185 | kmem_cache_free(ai->aeb_slab_cache, aeb); | ||
1186 | } | ||
1187 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) { | ||
1188 | list_del(&aeb->u.list); | ||
1189 | kmem_cache_free(ai->aeb_slab_cache, aeb); | ||
1190 | } | ||
1191 | |||
1192 | /* Destroy the volume RB-tree */ | ||
1193 | rb = ai->volumes.rb_node; | ||
1194 | while (rb) { | ||
1195 | if (rb->rb_left) | ||
1196 | rb = rb->rb_left; | ||
1197 | else if (rb->rb_right) | ||
1198 | rb = rb->rb_right; | ||
1199 | else { | ||
1200 | av = rb_entry(rb, struct ubi_ainf_volume, rb); | ||
1201 | |||
1202 | rb = rb_parent(rb); | ||
1203 | if (rb) { | ||
1204 | if (rb->rb_left == &av->rb) | ||
1205 | rb->rb_left = NULL; | ||
1206 | else | ||
1207 | rb->rb_right = NULL; | ||
1208 | } | ||
1209 | |||
1210 | destroy_av(ai, av); | ||
1211 | } | ||
1212 | } | ||
1213 | |||
1214 | if (ai->aeb_slab_cache) | ||
1215 | kmem_cache_destroy(ai->aeb_slab_cache); | ||
1216 | |||
1217 | kfree(ai); | ||
1218 | } | ||
1219 | |||
1220 | /** | ||
1124 | * scan_all - scan entire MTD device. | 1221 | * scan_all - scan entire MTD device. |
1125 | * @ubi: UBI device description object | 1222 | * @ubi: UBI device description object |
1223 | * @ai: attach info object | ||
1224 | * @start: start scanning at this PEB | ||
1126 | * | 1225 | * |
1127 | * This function does full scanning of an MTD device and returns complete | 1226 | * This function does full scanning of an MTD device and returns complete |
1128 | * information about it in form of a "struct ubi_attach_info" object. In case | 1227 | * information about it in form of a "struct ubi_attach_info" object. In case |
1129 | * of failure, an error code is returned. | 1228 | * of failure, an error code is returned. |
1130 | */ | 1229 | */ |
1131 | static struct ubi_attach_info *scan_all(struct ubi_device *ubi) | 1230 | static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai, |
1231 | int start) | ||
1132 | { | 1232 | { |
1133 | int err, pnum; | 1233 | int err, pnum; |
1134 | struct rb_node *rb1, *rb2; | 1234 | struct rb_node *rb1, *rb2; |
1135 | struct ubi_ainf_volume *av; | 1235 | struct ubi_ainf_volume *av; |
1136 | struct ubi_ainf_peb *aeb; | 1236 | struct ubi_ainf_peb *aeb; |
1137 | struct ubi_attach_info *ai; | ||
1138 | |||
1139 | ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL); | ||
1140 | if (!ai) | ||
1141 | return ERR_PTR(-ENOMEM); | ||
1142 | |||
1143 | INIT_LIST_HEAD(&ai->corr); | ||
1144 | INIT_LIST_HEAD(&ai->free); | ||
1145 | INIT_LIST_HEAD(&ai->erase); | ||
1146 | INIT_LIST_HEAD(&ai->alien); | ||
1147 | ai->volumes = RB_ROOT; | ||
1148 | 1237 | ||
1149 | err = -ENOMEM; | 1238 | err = -ENOMEM; |
1150 | ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache", | ||
1151 | sizeof(struct ubi_ainf_peb), | ||
1152 | 0, 0, NULL); | ||
1153 | if (!ai->aeb_slab_cache) | ||
1154 | goto out_ai; | ||
1155 | 1239 | ||
1156 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | 1240 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); |
1157 | if (!ech) | 1241 | if (!ech) |
1158 | goto out_ai; | 1242 | return err; |
1159 | 1243 | ||
1160 | vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); | 1244 | vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); |
1161 | if (!vidh) | 1245 | if (!vidh) |
1162 | goto out_ech; | 1246 | goto out_ech; |
1163 | 1247 | ||
1164 | for (pnum = 0; pnum < ubi->peb_count; pnum++) { | 1248 | for (pnum = start; pnum < ubi->peb_count; pnum++) { |
1165 | cond_resched(); | 1249 | cond_resched(); |
1166 | 1250 | ||
1167 | dbg_gen("process PEB %d", pnum); | 1251 | dbg_gen("process PEB %d", pnum); |
1168 | err = scan_peb(ubi, ai, pnum); | 1252 | err = scan_peb(ubi, ai, pnum, NULL, NULL); |
1169 | if (err < 0) | 1253 | if (err < 0) |
1170 | goto out_vidh; | 1254 | goto out_vidh; |
1171 | } | 1255 | } |
@@ -1210,32 +1294,144 @@ static struct ubi_attach_info *scan_all(struct ubi_device *ubi) | |||
1210 | ubi_free_vid_hdr(ubi, vidh); | 1294 | ubi_free_vid_hdr(ubi, vidh); |
1211 | kfree(ech); | 1295 | kfree(ech); |
1212 | 1296 | ||
1213 | return ai; | 1297 | return 0; |
1214 | 1298 | ||
1215 | out_vidh: | 1299 | out_vidh: |
1216 | ubi_free_vid_hdr(ubi, vidh); | 1300 | ubi_free_vid_hdr(ubi, vidh); |
1217 | out_ech: | 1301 | out_ech: |
1218 | kfree(ech); | 1302 | kfree(ech); |
1219 | out_ai: | 1303 | return err; |
1220 | ubi_destroy_ai(ai); | 1304 | } |
1221 | return ERR_PTR(err); | 1305 | |
1306 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
1307 | |||
1308 | /** | ||
1309 | * scan_fastmap - try to find a fastmap and attach from it. | ||
1310 | * @ubi: UBI device description object | ||
1311 | * @ai: attach info object | ||
1312 | * | ||
1313 | * Returns 0 on success, negative return values indicate an internal | ||
1314 | * error. | ||
1315 | * UBI_NO_FASTMAP denotes that no fastmap was found. | ||
1316 | * UBI_BAD_FASTMAP denotes that the found fastmap was invalid. | ||
1317 | */ | ||
1318 | static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai) | ||
1319 | { | ||
1320 | int err, pnum, fm_anchor = -1; | ||
1321 | unsigned long long max_sqnum = 0; | ||
1322 | |||
1323 | err = -ENOMEM; | ||
1324 | |||
1325 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | ||
1326 | if (!ech) | ||
1327 | goto out; | ||
1328 | |||
1329 | vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); | ||
1330 | if (!vidh) | ||
1331 | goto out_ech; | ||
1332 | |||
1333 | for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) { | ||
1334 | int vol_id = -1; | ||
1335 | unsigned long long sqnum = -1; | ||
1336 | cond_resched(); | ||
1337 | |||
1338 | dbg_gen("process PEB %d", pnum); | ||
1339 | err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum); | ||
1340 | if (err < 0) | ||
1341 | goto out_vidh; | ||
1342 | |||
1343 | if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) { | ||
1344 | max_sqnum = sqnum; | ||
1345 | fm_anchor = pnum; | ||
1346 | } | ||
1347 | } | ||
1348 | |||
1349 | ubi_free_vid_hdr(ubi, vidh); | ||
1350 | kfree(ech); | ||
1351 | |||
1352 | if (fm_anchor < 0) | ||
1353 | return UBI_NO_FASTMAP; | ||
1354 | |||
1355 | return ubi_scan_fastmap(ubi, ai, fm_anchor); | ||
1356 | |||
1357 | out_vidh: | ||
1358 | ubi_free_vid_hdr(ubi, vidh); | ||
1359 | out_ech: | ||
1360 | kfree(ech); | ||
1361 | out: | ||
1362 | return err; | ||
1363 | } | ||
1364 | |||
1365 | #endif | ||
1366 | |||
1367 | static struct ubi_attach_info *alloc_ai(const char *slab_name) | ||
1368 | { | ||
1369 | struct ubi_attach_info *ai; | ||
1370 | |||
1371 | ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL); | ||
1372 | if (!ai) | ||
1373 | return ai; | ||
1374 | |||
1375 | INIT_LIST_HEAD(&ai->corr); | ||
1376 | INIT_LIST_HEAD(&ai->free); | ||
1377 | INIT_LIST_HEAD(&ai->erase); | ||
1378 | INIT_LIST_HEAD(&ai->alien); | ||
1379 | ai->volumes = RB_ROOT; | ||
1380 | ai->aeb_slab_cache = kmem_cache_create(slab_name, | ||
1381 | sizeof(struct ubi_ainf_peb), | ||
1382 | 0, 0, NULL); | ||
1383 | if (!ai->aeb_slab_cache) { | ||
1384 | kfree(ai); | ||
1385 | ai = NULL; | ||
1386 | } | ||
1387 | |||
1388 | return ai; | ||
1222 | } | 1389 | } |
1223 | 1390 | ||
1224 | /** | 1391 | /** |
1225 | * ubi_attach - attach an MTD device. | 1392 | * ubi_attach - attach an MTD device. |
1226 | * @ubi: UBI device descriptor | 1393 | * @ubi: UBI device descriptor |
1394 | * @force_scan: if set to non-zero attach by scanning | ||
1227 | * | 1395 | * |
1228 | * This function returns zero in case of success and a negative error code in | 1396 | * This function returns zero in case of success and a negative error code in |
1229 | * case of failure. | 1397 | * case of failure. |
1230 | */ | 1398 | */ |
1231 | int ubi_attach(struct ubi_device *ubi) | 1399 | int ubi_attach(struct ubi_device *ubi, int force_scan) |
1232 | { | 1400 | { |
1233 | int err; | 1401 | int err; |
1234 | struct ubi_attach_info *ai; | 1402 | struct ubi_attach_info *ai; |
1235 | 1403 | ||
1236 | ai = scan_all(ubi); | 1404 | ai = alloc_ai("ubi_aeb_slab_cache"); |
1237 | if (IS_ERR(ai)) | 1405 | if (!ai) |
1238 | return PTR_ERR(ai); | 1406 | return -ENOMEM; |
1407 | |||
1408 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
1409 | /* On small flash devices we disable fastmap in any case. */ | ||
1410 | if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) { | ||
1411 | ubi->fm_disabled = 1; | ||
1412 | force_scan = 1; | ||
1413 | } | ||
1414 | |||
1415 | if (force_scan) | ||
1416 | err = scan_all(ubi, ai, 0); | ||
1417 | else { | ||
1418 | err = scan_fast(ubi, ai); | ||
1419 | if (err > 0) { | ||
1420 | if (err != UBI_NO_FASTMAP) { | ||
1421 | destroy_ai(ai); | ||
1422 | ai = alloc_ai("ubi_aeb_slab_cache2"); | ||
1423 | if (!ai) | ||
1424 | return -ENOMEM; | ||
1425 | } | ||
1426 | |||
1427 | err = scan_all(ubi, ai, UBI_FM_MAX_START); | ||
1428 | } | ||
1429 | } | ||
1430 | #else | ||
1431 | err = scan_all(ubi, ai, 0); | ||
1432 | #endif | ||
1433 | if (err) | ||
1434 | goto out_ai; | ||
1239 | 1435 | ||
1240 | ubi->bad_peb_count = ai->bad_peb_count; | 1436 | ubi->bad_peb_count = ai->bad_peb_count; |
1241 | ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; | 1437 | ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; |
@@ -1256,7 +1452,29 @@ int ubi_attach(struct ubi_device *ubi) | |||
1256 | if (err) | 1452 | if (err) |
1257 | goto out_wl; | 1453 | goto out_wl; |
1258 | 1454 | ||
1259 | ubi_destroy_ai(ai); | 1455 | #ifdef CONFIG_MTD_UBI_FASTMAP |
1456 | if (ubi->fm && ubi->dbg->chk_gen) { | ||
1457 | struct ubi_attach_info *scan_ai; | ||
1458 | |||
1459 | scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache"); | ||
1460 | if (!scan_ai) | ||
1461 | goto out_wl; | ||
1462 | |||
1463 | err = scan_all(ubi, scan_ai, 0); | ||
1464 | if (err) { | ||
1465 | destroy_ai(scan_ai); | ||
1466 | goto out_wl; | ||
1467 | } | ||
1468 | |||
1469 | err = self_check_eba(ubi, ai, scan_ai); | ||
1470 | destroy_ai(scan_ai); | ||
1471 | |||
1472 | if (err) | ||
1473 | goto out_wl; | ||
1474 | } | ||
1475 | #endif | ||
1476 | |||
1477 | destroy_ai(ai); | ||
1260 | return 0; | 1478 | return 0; |
1261 | 1479 | ||
1262 | out_wl: | 1480 | out_wl: |
@@ -1265,99 +1483,11 @@ out_vtbl: | |||
1265 | ubi_free_internal_volumes(ubi); | 1483 | ubi_free_internal_volumes(ubi); |
1266 | vfree(ubi->vtbl); | 1484 | vfree(ubi->vtbl); |
1267 | out_ai: | 1485 | out_ai: |
1268 | ubi_destroy_ai(ai); | 1486 | destroy_ai(ai); |
1269 | return err; | 1487 | return err; |
1270 | } | 1488 | } |
1271 | 1489 | ||
1272 | /** | 1490 | /** |
1273 | * destroy_av - free volume attaching information. | ||
1274 | * @av: volume attaching information | ||
1275 | * @ai: attaching information | ||
1276 | * | ||
1277 | * This function destroys the volume attaching information. | ||
1278 | */ | ||
1279 | static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av) | ||
1280 | { | ||
1281 | struct ubi_ainf_peb *aeb; | ||
1282 | struct rb_node *this = av->root.rb_node; | ||
1283 | |||
1284 | while (this) { | ||
1285 | if (this->rb_left) | ||
1286 | this = this->rb_left; | ||
1287 | else if (this->rb_right) | ||
1288 | this = this->rb_right; | ||
1289 | else { | ||
1290 | aeb = rb_entry(this, struct ubi_ainf_peb, u.rb); | ||
1291 | this = rb_parent(this); | ||
1292 | if (this) { | ||
1293 | if (this->rb_left == &aeb->u.rb) | ||
1294 | this->rb_left = NULL; | ||
1295 | else | ||
1296 | this->rb_right = NULL; | ||
1297 | } | ||
1298 | |||
1299 | kmem_cache_free(ai->aeb_slab_cache, aeb); | ||
1300 | } | ||
1301 | } | ||
1302 | kfree(av); | ||
1303 | } | ||
1304 | |||
1305 | /** | ||
1306 | * ubi_destroy_ai - destroy attaching information. | ||
1307 | * @ai: attaching information | ||
1308 | */ | ||
1309 | void ubi_destroy_ai(struct ubi_attach_info *ai) | ||
1310 | { | ||
1311 | struct ubi_ainf_peb *aeb, *aeb_tmp; | ||
1312 | struct ubi_ainf_volume *av; | ||
1313 | struct rb_node *rb; | ||
1314 | |||
1315 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) { | ||
1316 | list_del(&aeb->u.list); | ||
1317 | kmem_cache_free(ai->aeb_slab_cache, aeb); | ||
1318 | } | ||
1319 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) { | ||
1320 | list_del(&aeb->u.list); | ||
1321 | kmem_cache_free(ai->aeb_slab_cache, aeb); | ||
1322 | } | ||
1323 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) { | ||
1324 | list_del(&aeb->u.list); | ||
1325 | kmem_cache_free(ai->aeb_slab_cache, aeb); | ||
1326 | } | ||
1327 | list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) { | ||
1328 | list_del(&aeb->u.list); | ||
1329 | kmem_cache_free(ai->aeb_slab_cache, aeb); | ||
1330 | } | ||
1331 | |||
1332 | /* Destroy the volume RB-tree */ | ||
1333 | rb = ai->volumes.rb_node; | ||
1334 | while (rb) { | ||
1335 | if (rb->rb_left) | ||
1336 | rb = rb->rb_left; | ||
1337 | else if (rb->rb_right) | ||
1338 | rb = rb->rb_right; | ||
1339 | else { | ||
1340 | av = rb_entry(rb, struct ubi_ainf_volume, rb); | ||
1341 | |||
1342 | rb = rb_parent(rb); | ||
1343 | if (rb) { | ||
1344 | if (rb->rb_left == &av->rb) | ||
1345 | rb->rb_left = NULL; | ||
1346 | else | ||
1347 | rb->rb_right = NULL; | ||
1348 | } | ||
1349 | |||
1350 | destroy_av(ai, av); | ||
1351 | } | ||
1352 | } | ||
1353 | |||
1354 | if (ai->aeb_slab_cache) | ||
1355 | kmem_cache_destroy(ai->aeb_slab_cache); | ||
1356 | |||
1357 | kfree(ai); | ||
1358 | } | ||
1359 | |||
1360 | /** | ||
1361 | * self_check_ai - check the attaching information. | 1491 | * self_check_ai - check the attaching information. |
1362 | * @ubi: UBI device description object | 1492 | * @ubi: UBI device description object |
1363 | * @ai: attaching information | 1493 | * @ai: attaching information |
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 34977039850c..344b4cb49d4e 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c | |||
@@ -76,7 +76,10 @@ static int __initdata mtd_devs; | |||
76 | 76 | ||
77 | /* MTD devices specification parameters */ | 77 | /* MTD devices specification parameters */ |
78 | static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES]; | 78 | static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES]; |
79 | 79 | #ifdef CONFIG_MTD_UBI_FASTMAP | |
80 | /* UBI module parameter to enable fastmap automatically on non-fastmap images */ | ||
81 | static bool fm_autoconvert; | ||
82 | #endif | ||
80 | /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ | 83 | /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ |
81 | struct class *ubi_class; | 84 | struct class *ubi_class; |
82 | 85 | ||
@@ -153,6 +156,19 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype) | |||
153 | 156 | ||
154 | ubi_do_get_device_info(ubi, &nt.di); | 157 | ubi_do_get_device_info(ubi, &nt.di); |
155 | ubi_do_get_volume_info(ubi, vol, &nt.vi); | 158 | ubi_do_get_volume_info(ubi, vol, &nt.vi); |
159 | |||
160 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
161 | switch (ntype) { | ||
162 | case UBI_VOLUME_ADDED: | ||
163 | case UBI_VOLUME_REMOVED: | ||
164 | case UBI_VOLUME_RESIZED: | ||
165 | case UBI_VOLUME_RENAMED: | ||
166 | if (ubi_update_fastmap(ubi)) { | ||
167 | ubi_err("Unable to update fastmap!"); | ||
168 | ubi_ro_mode(ubi); | ||
169 | } | ||
170 | } | ||
171 | #endif | ||
156 | return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); | 172 | return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); |
157 | } | 173 | } |
158 | 174 | ||
@@ -918,10 +934,40 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, | |||
918 | ubi->vid_hdr_offset = vid_hdr_offset; | 934 | ubi->vid_hdr_offset = vid_hdr_offset; |
919 | ubi->autoresize_vol_id = -1; | 935 | ubi->autoresize_vol_id = -1; |
920 | 936 | ||
937 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
938 | ubi->fm_pool.used = ubi->fm_pool.size = 0; | ||
939 | ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0; | ||
940 | |||
941 | /* | ||
942 | * fm_pool.max_size is 5% of the total number of PEBs but it's also | ||
943 | * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE. | ||
944 | */ | ||
945 | ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size, | ||
946 | ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE); | ||
947 | if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE) | ||
948 | ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE; | ||
949 | |||
950 | ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE; | ||
951 | ubi->fm_disabled = !fm_autoconvert; | ||
952 | |||
953 | if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) | ||
954 | <= UBI_FM_MAX_START) { | ||
955 | ubi_err("More than %i PEBs are needed for fastmap, sorry.", | ||
956 | UBI_FM_MAX_START); | ||
957 | ubi->fm_disabled = 1; | ||
958 | } | ||
959 | |||
960 | ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size); | ||
961 | ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size); | ||
962 | #else | ||
963 | ubi->fm_disabled = 1; | ||
964 | #endif | ||
921 | mutex_init(&ubi->buf_mutex); | 965 | mutex_init(&ubi->buf_mutex); |
922 | mutex_init(&ubi->ckvol_mutex); | 966 | mutex_init(&ubi->ckvol_mutex); |
923 | mutex_init(&ubi->device_mutex); | 967 | mutex_init(&ubi->device_mutex); |
924 | spin_lock_init(&ubi->volumes_lock); | 968 | spin_lock_init(&ubi->volumes_lock); |
969 | mutex_init(&ubi->fm_mutex); | ||
970 | init_rwsem(&ubi->fm_sem); | ||
925 | 971 | ||
926 | ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); | 972 | ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); |
927 | 973 | ||
@@ -934,11 +980,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, | |||
934 | if (!ubi->peb_buf) | 980 | if (!ubi->peb_buf) |
935 | goto out_free; | 981 | goto out_free; |
936 | 982 | ||
983 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
984 | ubi->fm_size = ubi_calc_fm_size(ubi); | ||
985 | ubi->fm_buf = vzalloc(ubi->fm_size); | ||
986 | if (!ubi->fm_buf) | ||
987 | goto out_free; | ||
988 | #endif | ||
937 | err = ubi_debugging_init_dev(ubi); | 989 | err = ubi_debugging_init_dev(ubi); |
938 | if (err) | 990 | if (err) |
939 | goto out_free; | 991 | goto out_free; |
940 | 992 | ||
941 | err = ubi_attach(ubi); | 993 | err = ubi_attach(ubi, 0); |
942 | if (err) { | 994 | if (err) { |
943 | ubi_err("failed to attach mtd%d, error %d", mtd->index, err); | 995 | ubi_err("failed to attach mtd%d, error %d", mtd->index, err); |
944 | goto out_debugging; | 996 | goto out_debugging; |
@@ -1012,6 +1064,7 @@ out_debugging: | |||
1012 | ubi_debugging_exit_dev(ubi); | 1064 | ubi_debugging_exit_dev(ubi); |
1013 | out_free: | 1065 | out_free: |
1014 | vfree(ubi->peb_buf); | 1066 | vfree(ubi->peb_buf); |
1067 | vfree(ubi->fm_buf); | ||
1015 | if (ref) | 1068 | if (ref) |
1016 | put_device(&ubi->dev); | 1069 | put_device(&ubi->dev); |
1017 | else | 1070 | else |
@@ -1061,7 +1114,11 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) | |||
1061 | ubi_assert(ubi_num == ubi->ubi_num); | 1114 | ubi_assert(ubi_num == ubi->ubi_num); |
1062 | ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); | 1115 | ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); |
1063 | ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); | 1116 | ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); |
1064 | 1117 | #ifdef CONFIG_MTD_UBI_FASTMAP | |
1118 | /* If we don't write a new fastmap at detach time we lose all | ||
1119 | * EC updates that have been made since the last written fastmap. */ | ||
1120 | ubi_update_fastmap(ubi); | ||
1121 | #endif | ||
1065 | /* | 1122 | /* |
1066 | * Before freeing anything, we have to stop the background thread to | 1123 | * Before freeing anything, we have to stop the background thread to |
1067 | * prevent it from doing anything on this device while we are freeing. | 1124 | * prevent it from doing anything on this device while we are freeing. |
@@ -1077,12 +1134,14 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) | |||
1077 | 1134 | ||
1078 | ubi_debugfs_exit_dev(ubi); | 1135 | ubi_debugfs_exit_dev(ubi); |
1079 | uif_close(ubi); | 1136 | uif_close(ubi); |
1137 | |||
1080 | ubi_wl_close(ubi); | 1138 | ubi_wl_close(ubi); |
1081 | ubi_free_internal_volumes(ubi); | 1139 | ubi_free_internal_volumes(ubi); |
1082 | vfree(ubi->vtbl); | 1140 | vfree(ubi->vtbl); |
1083 | put_mtd_device(ubi->mtd); | 1141 | put_mtd_device(ubi->mtd); |
1084 | ubi_debugging_exit_dev(ubi); | 1142 | ubi_debugging_exit_dev(ubi); |
1085 | vfree(ubi->peb_buf); | 1143 | vfree(ubi->peb_buf); |
1144 | vfree(ubi->fm_buf); | ||
1086 | ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); | 1145 | ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); |
1087 | put_device(&ubi->dev); | 1146 | put_device(&ubi->dev); |
1088 | return 0; | 1147 | return 0; |
@@ -1404,7 +1463,10 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa | |||
1404 | "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n" | 1463 | "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n" |
1405 | "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n" | 1464 | "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n" |
1406 | "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device)."); | 1465 | "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device)."); |
1407 | 1466 | #ifdef CONFIG_MTD_UBI_FASTMAP | |
1467 | module_param(fm_autoconvert, bool, 0644); | ||
1468 | MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap."); | ||
1469 | #endif | ||
1408 | MODULE_VERSION(__stringify(UBI_VERSION)); | 1470 | MODULE_VERSION(__stringify(UBI_VERSION)); |
1409 | MODULE_DESCRIPTION("UBI - Unsorted Block Images"); | 1471 | MODULE_DESCRIPTION("UBI - Unsorted Block Images"); |
1410 | MODULE_AUTHOR("Artem Bityutskiy"); | 1472 | MODULE_AUTHOR("Artem Bityutskiy"); |
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index a26d7d253174..0e11671dadc4 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
@@ -57,7 +57,7 @@ | |||
57 | * global sequence counter value. It also increases the global sequence | 57 | * global sequence counter value. It also increases the global sequence |
58 | * counter. | 58 | * counter. |
59 | */ | 59 | */ |
60 | static unsigned long long next_sqnum(struct ubi_device *ubi) | 60 | unsigned long long ubi_next_sqnum(struct ubi_device *ubi) |
61 | { | 61 | { |
62 | unsigned long long sqnum; | 62 | unsigned long long sqnum; |
63 | 63 | ||
@@ -340,7 +340,9 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, | |||
340 | 340 | ||
341 | dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); | 341 | dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); |
342 | 342 | ||
343 | down_read(&ubi->fm_sem); | ||
343 | vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED; | 344 | vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED; |
345 | up_read(&ubi->fm_sem); | ||
344 | err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0); | 346 | err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0); |
345 | 347 | ||
346 | out_unlock: | 348 | out_unlock: |
@@ -521,7 +523,7 @@ retry: | |||
521 | goto out_put; | 523 | goto out_put; |
522 | } | 524 | } |
523 | 525 | ||
524 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 526 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
525 | err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); | 527 | err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); |
526 | if (err) | 528 | if (err) |
527 | goto write_error; | 529 | goto write_error; |
@@ -548,7 +550,9 @@ retry: | |||
548 | mutex_unlock(&ubi->buf_mutex); | 550 | mutex_unlock(&ubi->buf_mutex); |
549 | ubi_free_vid_hdr(ubi, vid_hdr); | 551 | ubi_free_vid_hdr(ubi, vid_hdr); |
550 | 552 | ||
553 | down_read(&ubi->fm_sem); | ||
551 | vol->eba_tbl[lnum] = new_pnum; | 554 | vol->eba_tbl[lnum] = new_pnum; |
555 | up_read(&ubi->fm_sem); | ||
552 | ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); | 556 | ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); |
553 | 557 | ||
554 | ubi_msg("data was successfully recovered"); | 558 | ubi_msg("data was successfully recovered"); |
@@ -632,7 +636,7 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, | |||
632 | } | 636 | } |
633 | 637 | ||
634 | vid_hdr->vol_type = UBI_VID_DYNAMIC; | 638 | vid_hdr->vol_type = UBI_VID_DYNAMIC; |
635 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 639 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
636 | vid_hdr->vol_id = cpu_to_be32(vol_id); | 640 | vid_hdr->vol_id = cpu_to_be32(vol_id); |
637 | vid_hdr->lnum = cpu_to_be32(lnum); | 641 | vid_hdr->lnum = cpu_to_be32(lnum); |
638 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); | 642 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); |
@@ -665,7 +669,9 @@ retry: | |||
665 | } | 669 | } |
666 | } | 670 | } |
667 | 671 | ||
672 | down_read(&ubi->fm_sem); | ||
668 | vol->eba_tbl[lnum] = pnum; | 673 | vol->eba_tbl[lnum] = pnum; |
674 | up_read(&ubi->fm_sem); | ||
669 | 675 | ||
670 | leb_write_unlock(ubi, vol_id, lnum); | 676 | leb_write_unlock(ubi, vol_id, lnum); |
671 | ubi_free_vid_hdr(ubi, vid_hdr); | 677 | ubi_free_vid_hdr(ubi, vid_hdr); |
@@ -692,7 +698,7 @@ write_error: | |||
692 | return err; | 698 | return err; |
693 | } | 699 | } |
694 | 700 | ||
695 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 701 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
696 | ubi_msg("try another PEB"); | 702 | ubi_msg("try another PEB"); |
697 | goto retry; | 703 | goto retry; |
698 | } | 704 | } |
@@ -745,7 +751,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, | |||
745 | return err; | 751 | return err; |
746 | } | 752 | } |
747 | 753 | ||
748 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 754 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
749 | vid_hdr->vol_id = cpu_to_be32(vol_id); | 755 | vid_hdr->vol_id = cpu_to_be32(vol_id); |
750 | vid_hdr->lnum = cpu_to_be32(lnum); | 756 | vid_hdr->lnum = cpu_to_be32(lnum); |
751 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); | 757 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); |
@@ -783,7 +789,9 @@ retry: | |||
783 | } | 789 | } |
784 | 790 | ||
785 | ubi_assert(vol->eba_tbl[lnum] < 0); | 791 | ubi_assert(vol->eba_tbl[lnum] < 0); |
792 | down_read(&ubi->fm_sem); | ||
786 | vol->eba_tbl[lnum] = pnum; | 793 | vol->eba_tbl[lnum] = pnum; |
794 | up_read(&ubi->fm_sem); | ||
787 | 795 | ||
788 | leb_write_unlock(ubi, vol_id, lnum); | 796 | leb_write_unlock(ubi, vol_id, lnum); |
789 | ubi_free_vid_hdr(ubi, vid_hdr); | 797 | ubi_free_vid_hdr(ubi, vid_hdr); |
@@ -810,7 +818,7 @@ write_error: | |||
810 | return err; | 818 | return err; |
811 | } | 819 | } |
812 | 820 | ||
813 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 821 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
814 | ubi_msg("try another PEB"); | 822 | ubi_msg("try another PEB"); |
815 | goto retry; | 823 | goto retry; |
816 | } | 824 | } |
@@ -862,7 +870,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, | |||
862 | if (err) | 870 | if (err) |
863 | goto out_mutex; | 871 | goto out_mutex; |
864 | 872 | ||
865 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 873 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
866 | vid_hdr->vol_id = cpu_to_be32(vol_id); | 874 | vid_hdr->vol_id = cpu_to_be32(vol_id); |
867 | vid_hdr->lnum = cpu_to_be32(lnum); | 875 | vid_hdr->lnum = cpu_to_be32(lnum); |
868 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); | 876 | vid_hdr->compat = ubi_get_compat(ubi, vol_id); |
@@ -904,7 +912,9 @@ retry: | |||
904 | goto out_leb_unlock; | 912 | goto out_leb_unlock; |
905 | } | 913 | } |
906 | 914 | ||
915 | down_read(&ubi->fm_sem); | ||
907 | vol->eba_tbl[lnum] = pnum; | 916 | vol->eba_tbl[lnum] = pnum; |
917 | up_read(&ubi->fm_sem); | ||
908 | 918 | ||
909 | out_leb_unlock: | 919 | out_leb_unlock: |
910 | leb_write_unlock(ubi, vol_id, lnum); | 920 | leb_write_unlock(ubi, vol_id, lnum); |
@@ -930,7 +940,7 @@ write_error: | |||
930 | goto out_leb_unlock; | 940 | goto out_leb_unlock; |
931 | } | 941 | } |
932 | 942 | ||
933 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 943 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
934 | ubi_msg("try another PEB"); | 944 | ubi_msg("try another PEB"); |
935 | goto retry; | 945 | goto retry; |
936 | } | 946 | } |
@@ -1089,7 +1099,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
1089 | vid_hdr->data_size = cpu_to_be32(data_size); | 1099 | vid_hdr->data_size = cpu_to_be32(data_size); |
1090 | vid_hdr->data_crc = cpu_to_be32(crc); | 1100 | vid_hdr->data_crc = cpu_to_be32(crc); |
1091 | } | 1101 | } |
1092 | vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); | 1102 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
1093 | 1103 | ||
1094 | err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); | 1104 | err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); |
1095 | if (err) { | 1105 | if (err) { |
@@ -1151,7 +1161,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
1151 | } | 1161 | } |
1152 | 1162 | ||
1153 | ubi_assert(vol->eba_tbl[lnum] == from); | 1163 | ubi_assert(vol->eba_tbl[lnum] == from); |
1164 | down_read(&ubi->fm_sem); | ||
1154 | vol->eba_tbl[lnum] = to; | 1165 | vol->eba_tbl[lnum] = to; |
1166 | up_read(&ubi->fm_sem); | ||
1155 | 1167 | ||
1156 | out_unlock_buf: | 1168 | out_unlock_buf: |
1157 | mutex_unlock(&ubi->buf_mutex); | 1169 | mutex_unlock(&ubi->buf_mutex); |
@@ -1202,6 +1214,102 @@ static void print_rsvd_warning(struct ubi_device *ubi, | |||
1202 | } | 1214 | } |
1203 | 1215 | ||
1204 | /** | 1216 | /** |
1217 | * self_check_eba - run a self check on the EBA table constructed by fastmap. | ||
1218 | * @ubi: UBI device description object | ||
1219 | * @ai_fastmap: UBI attach info object created by fastmap | ||
1220 | * @ai_scan: UBI attach info object created by scanning | ||
1221 | * | ||
1222 | * Returns < 0 in case of an internal error, 0 otherwise. | ||
1223 | * If a bad EBA table entry was found it will be printed out and | ||
1224 | * ubi_assert() triggers. | ||
1225 | */ | ||
1226 | int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap, | ||
1227 | struct ubi_attach_info *ai_scan) | ||
1228 | { | ||
1229 | int i, j, num_volumes, ret = 0; | ||
1230 | int **scan_eba, **fm_eba; | ||
1231 | struct ubi_ainf_volume *av; | ||
1232 | struct ubi_volume *vol; | ||
1233 | struct ubi_ainf_peb *aeb; | ||
1234 | struct rb_node *rb; | ||
1235 | |||
1236 | num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; | ||
1237 | |||
1238 | scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL); | ||
1239 | if (!scan_eba) | ||
1240 | return -ENOMEM; | ||
1241 | |||
1242 | fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL); | ||
1243 | if (!fm_eba) { | ||
1244 | kfree(scan_eba); | ||
1245 | return -ENOMEM; | ||
1246 | } | ||
1247 | |||
1248 | for (i = 0; i < num_volumes; i++) { | ||
1249 | vol = ubi->volumes[i]; | ||
1250 | if (!vol) | ||
1251 | continue; | ||
1252 | |||
1253 | scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba), | ||
1254 | GFP_KERNEL); | ||
1255 | if (!scan_eba[i]) { | ||
1256 | ret = -ENOMEM; | ||
1257 | goto out_free; | ||
1258 | } | ||
1259 | |||
1260 | fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba), | ||
1261 | GFP_KERNEL); | ||
1262 | if (!fm_eba[i]) { | ||
1263 | ret = -ENOMEM; | ||
1264 | goto out_free; | ||
1265 | } | ||
1266 | |||
1267 | for (j = 0; j < vol->reserved_pebs; j++) | ||
1268 | scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED; | ||
1269 | |||
1270 | av = ubi_find_av(ai_scan, idx2vol_id(ubi, i)); | ||
1271 | if (!av) | ||
1272 | continue; | ||
1273 | |||
1274 | ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) | ||
1275 | scan_eba[i][aeb->lnum] = aeb->pnum; | ||
1276 | |||
1277 | av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i)); | ||
1278 | if (!av) | ||
1279 | continue; | ||
1280 | |||
1281 | ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) | ||
1282 | fm_eba[i][aeb->lnum] = aeb->pnum; | ||
1283 | |||
1284 | for (j = 0; j < vol->reserved_pebs; j++) { | ||
1285 | if (scan_eba[i][j] != fm_eba[i][j]) { | ||
1286 | if (scan_eba[i][j] == UBI_LEB_UNMAPPED || | ||
1287 | fm_eba[i][j] == UBI_LEB_UNMAPPED) | ||
1288 | continue; | ||
1289 | |||
1290 | ubi_err("LEB:%i:%i is PEB:%i instead of %i!", | ||
1291 | vol->vol_id, i, fm_eba[i][j], | ||
1292 | scan_eba[i][j]); | ||
1293 | ubi_assert(0); | ||
1294 | } | ||
1295 | } | ||
1296 | } | ||
1297 | |||
1298 | out_free: | ||
1299 | for (i = 0; i < num_volumes; i++) { | ||
1300 | if (!ubi->volumes[i]) | ||
1301 | continue; | ||
1302 | |||
1303 | kfree(scan_eba[i]); | ||
1304 | kfree(fm_eba[i]); | ||
1305 | } | ||
1306 | |||
1307 | kfree(scan_eba); | ||
1308 | kfree(fm_eba); | ||
1309 | return ret; | ||
1310 | } | ||
1311 | |||
1312 | /** | ||
1205 | * ubi_eba_init - initialize the EBA sub-system using attaching information. | 1313 | * ubi_eba_init - initialize the EBA sub-system using attaching information. |
1206 | * @ubi: UBI device description object | 1314 | * @ubi: UBI device description object |
1207 | * @ai: attaching information | 1315 | * @ai: attaching information |
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c new file mode 100644 index 000000000000..1a5f53c090d4 --- /dev/null +++ b/drivers/mtd/ubi/fastmap.c | |||
@@ -0,0 +1,1537 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2012 Linutronix GmbH | ||
3 | * Author: Richard Weinberger <richard@nod.at> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
12 | * the GNU General Public License for more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/crc32.h> | ||
17 | #include "ubi.h" | ||
18 | |||
19 | /** | ||
20 | * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device. | ||
21 | * @ubi: UBI device description object | ||
22 | */ | ||
23 | size_t ubi_calc_fm_size(struct ubi_device *ubi) | ||
24 | { | ||
25 | size_t size; | ||
26 | |||
27 | size = sizeof(struct ubi_fm_hdr) + \ | ||
28 | sizeof(struct ubi_fm_scan_pool) + \ | ||
29 | sizeof(struct ubi_fm_scan_pool) + \ | ||
30 | (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \ | ||
31 | (sizeof(struct ubi_fm_eba) + \ | ||
32 | (ubi->peb_count * sizeof(__be32))) + \ | ||
33 | sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES; | ||
34 | return roundup(size, ubi->leb_size); | ||
35 | } | ||
36 | |||
37 | |||
38 | /** | ||
39 | * new_fm_vhdr - allocate a new volume header for fastmap usage. | ||
40 | * @ubi: UBI device description object | ||
41 | * @vol_id: the VID of the new header | ||
42 | * | ||
43 | * Returns a new struct ubi_vid_hdr on success. | ||
44 | * NULL indicates out of memory. | ||
45 | */ | ||
46 | static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id) | ||
47 | { | ||
48 | struct ubi_vid_hdr *new; | ||
49 | |||
50 | new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); | ||
51 | if (!new) | ||
52 | goto out; | ||
53 | |||
54 | new->vol_type = UBI_VID_DYNAMIC; | ||
55 | new->vol_id = cpu_to_be32(vol_id); | ||
56 | |||
57 | /* UBI implementations without fastmap support have to delete the | ||
58 | * fastmap. | ||
59 | */ | ||
60 | new->compat = UBI_COMPAT_DELETE; | ||
61 | |||
62 | out: | ||
63 | return new; | ||
64 | } | ||
65 | |||
66 | /** | ||
67 | * add_aeb - create and add a attach erase block to a given list. | ||
68 | * @ai: UBI attach info object | ||
69 | * @list: the target list | ||
70 | * @pnum: PEB number of the new attach erase block | ||
71 | * @ec: erease counter of the new LEB | ||
72 | * @scrub: scrub this PEB after attaching | ||
73 | * | ||
74 | * Returns 0 on success, < 0 indicates an internal error. | ||
75 | */ | ||
76 | static int add_aeb(struct ubi_attach_info *ai, struct list_head *list, | ||
77 | int pnum, int ec, int scrub) | ||
78 | { | ||
79 | struct ubi_ainf_peb *aeb; | ||
80 | |||
81 | aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); | ||
82 | if (!aeb) | ||
83 | return -ENOMEM; | ||
84 | |||
85 | aeb->pnum = pnum; | ||
86 | aeb->ec = ec; | ||
87 | aeb->lnum = -1; | ||
88 | aeb->scrub = scrub; | ||
89 | aeb->copy_flag = aeb->sqnum = 0; | ||
90 | |||
91 | ai->ec_sum += aeb->ec; | ||
92 | ai->ec_count++; | ||
93 | |||
94 | if (ai->max_ec < aeb->ec) | ||
95 | ai->max_ec = aeb->ec; | ||
96 | |||
97 | if (ai->min_ec > aeb->ec) | ||
98 | ai->min_ec = aeb->ec; | ||
99 | |||
100 | list_add_tail(&aeb->u.list, list); | ||
101 | |||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | /** | ||
106 | * add_vol - create and add a new volume to ubi_attach_info. | ||
107 | * @ai: ubi_attach_info object | ||
108 | * @vol_id: VID of the new volume | ||
109 | * @used_ebs: number of used EBS | ||
110 | * @data_pad: data padding value of the new volume | ||
111 | * @vol_type: volume type | ||
112 | * @last_eb_bytes: number of bytes in the last LEB | ||
113 | * | ||
114 | * Returns the new struct ubi_ainf_volume on success. | ||
115 | * NULL indicates an error. | ||
116 | */ | ||
117 | static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id, | ||
118 | int used_ebs, int data_pad, u8 vol_type, | ||
119 | int last_eb_bytes) | ||
120 | { | ||
121 | struct ubi_ainf_volume *av; | ||
122 | struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; | ||
123 | |||
124 | while (*p) { | ||
125 | parent = *p; | ||
126 | av = rb_entry(parent, struct ubi_ainf_volume, rb); | ||
127 | |||
128 | if (vol_id > av->vol_id) | ||
129 | p = &(*p)->rb_left; | ||
130 | else if (vol_id > av->vol_id) | ||
131 | p = &(*p)->rb_right; | ||
132 | } | ||
133 | |||
134 | av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL); | ||
135 | if (!av) | ||
136 | goto out; | ||
137 | |||
138 | av->highest_lnum = av->leb_count = 0; | ||
139 | av->vol_id = vol_id; | ||
140 | av->used_ebs = used_ebs; | ||
141 | av->data_pad = data_pad; | ||
142 | av->last_data_size = last_eb_bytes; | ||
143 | av->compat = 0; | ||
144 | av->vol_type = vol_type; | ||
145 | av->root = RB_ROOT; | ||
146 | |||
147 | dbg_bld("found volume (ID %i)", vol_id); | ||
148 | |||
149 | rb_link_node(&av->rb, parent, p); | ||
150 | rb_insert_color(&av->rb, &ai->volumes); | ||
151 | |||
152 | out: | ||
153 | return av; | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it | ||
158 | * from it's original list. | ||
159 | * @ai: ubi_attach_info object | ||
160 | * @aeb: the to be assigned SEB | ||
161 | * @av: target scan volume | ||
162 | */ | ||
163 | static void assign_aeb_to_av(struct ubi_attach_info *ai, | ||
164 | struct ubi_ainf_peb *aeb, | ||
165 | struct ubi_ainf_volume *av) | ||
166 | { | ||
167 | struct ubi_ainf_peb *tmp_aeb; | ||
168 | struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; | ||
169 | |||
170 | p = &av->root.rb_node; | ||
171 | while (*p) { | ||
172 | parent = *p; | ||
173 | |||
174 | tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); | ||
175 | if (aeb->lnum != tmp_aeb->lnum) { | ||
176 | if (aeb->lnum < tmp_aeb->lnum) | ||
177 | p = &(*p)->rb_left; | ||
178 | else | ||
179 | p = &(*p)->rb_right; | ||
180 | |||
181 | continue; | ||
182 | } else | ||
183 | break; | ||
184 | } | ||
185 | |||
186 | list_del(&aeb->u.list); | ||
187 | av->leb_count++; | ||
188 | |||
189 | rb_link_node(&aeb->u.rb, parent, p); | ||
190 | rb_insert_color(&aeb->u.rb, &av->root); | ||
191 | } | ||
192 | |||
193 | /** | ||
194 | * update_vol - inserts or updates a LEB which was found a pool. | ||
195 | * @ubi: the UBI device object | ||
196 | * @ai: attach info object | ||
197 | * @av: the volume this LEB belongs to | ||
198 | * @new_vh: the volume header derived from new_aeb | ||
199 | * @new_aeb: the AEB to be examined | ||
200 | * | ||
201 | * Returns 0 on success, < 0 indicates an internal error. | ||
202 | */ | ||
203 | static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai, | ||
204 | struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh, | ||
205 | struct ubi_ainf_peb *new_aeb) | ||
206 | { | ||
207 | struct rb_node **p = &av->root.rb_node, *parent = NULL; | ||
208 | struct ubi_ainf_peb *aeb, *victim; | ||
209 | int cmp_res; | ||
210 | |||
211 | while (*p) { | ||
212 | parent = *p; | ||
213 | aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); | ||
214 | |||
215 | if (be32_to_cpu(new_vh->lnum) != aeb->lnum) { | ||
216 | if (be32_to_cpu(new_vh->lnum) < aeb->lnum) | ||
217 | p = &(*p)->rb_left; | ||
218 | else | ||
219 | p = &(*p)->rb_right; | ||
220 | |||
221 | continue; | ||
222 | } | ||
223 | |||
224 | /* This case can happen if the fastmap gets written | ||
225 | * because of a volume change (creation, deletion, ..). | ||
226 | * Then a PEB can be within the persistent EBA and the pool. | ||
227 | */ | ||
228 | if (aeb->pnum == new_aeb->pnum) { | ||
229 | ubi_assert(aeb->lnum == new_aeb->lnum); | ||
230 | kmem_cache_free(ai->aeb_slab_cache, new_aeb); | ||
231 | |||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh); | ||
236 | if (cmp_res < 0) | ||
237 | return cmp_res; | ||
238 | |||
239 | /* new_aeb is newer */ | ||
240 | if (cmp_res & 1) { | ||
241 | victim = kmem_cache_alloc(ai->aeb_slab_cache, | ||
242 | GFP_KERNEL); | ||
243 | if (!victim) | ||
244 | return -ENOMEM; | ||
245 | |||
246 | victim->ec = aeb->ec; | ||
247 | victim->pnum = aeb->pnum; | ||
248 | list_add_tail(&victim->u.list, &ai->erase); | ||
249 | |||
250 | if (av->highest_lnum == be32_to_cpu(new_vh->lnum)) | ||
251 | av->last_data_size = \ | ||
252 | be32_to_cpu(new_vh->data_size); | ||
253 | |||
254 | dbg_bld("vol %i: AEB %i's PEB %i is the newer", | ||
255 | av->vol_id, aeb->lnum, new_aeb->pnum); | ||
256 | |||
257 | aeb->ec = new_aeb->ec; | ||
258 | aeb->pnum = new_aeb->pnum; | ||
259 | aeb->copy_flag = new_vh->copy_flag; | ||
260 | aeb->scrub = new_aeb->scrub; | ||
261 | kmem_cache_free(ai->aeb_slab_cache, new_aeb); | ||
262 | |||
263 | /* new_aeb is older */ | ||
264 | } else { | ||
265 | dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it", | ||
266 | av->vol_id, aeb->lnum, new_aeb->pnum); | ||
267 | list_add_tail(&new_aeb->u.list, &ai->erase); | ||
268 | } | ||
269 | |||
270 | return 0; | ||
271 | } | ||
272 | /* This LEB is new, let's add it to the volume */ | ||
273 | |||
274 | if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) { | ||
275 | av->highest_lnum = be32_to_cpu(new_vh->lnum); | ||
276 | av->last_data_size = be32_to_cpu(new_vh->data_size); | ||
277 | } | ||
278 | |||
279 | if (av->vol_type == UBI_STATIC_VOLUME) | ||
280 | av->used_ebs = be32_to_cpu(new_vh->used_ebs); | ||
281 | |||
282 | av->leb_count++; | ||
283 | |||
284 | rb_link_node(&new_aeb->u.rb, parent, p); | ||
285 | rb_insert_color(&new_aeb->u.rb, &av->root); | ||
286 | |||
287 | return 0; | ||
288 | } | ||
289 | |||
290 | /** | ||
291 | * process_pool_aeb - we found a non-empty PEB in a pool. | ||
292 | * @ubi: UBI device object | ||
293 | * @ai: attach info object | ||
294 | * @new_vh: the volume header derived from new_aeb | ||
295 | * @new_aeb: the AEB to be examined | ||
296 | * | ||
297 | * Returns 0 on success, < 0 indicates an internal error. | ||
298 | */ | ||
299 | static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai, | ||
300 | struct ubi_vid_hdr *new_vh, | ||
301 | struct ubi_ainf_peb *new_aeb) | ||
302 | { | ||
303 | struct ubi_ainf_volume *av, *tmp_av = NULL; | ||
304 | struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; | ||
305 | int found = 0; | ||
306 | |||
307 | if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID || | ||
308 | be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) { | ||
309 | kmem_cache_free(ai->aeb_slab_cache, new_aeb); | ||
310 | |||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | /* Find the volume this SEB belongs to */ | ||
315 | while (*p) { | ||
316 | parent = *p; | ||
317 | tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb); | ||
318 | |||
319 | if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id) | ||
320 | p = &(*p)->rb_left; | ||
321 | else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id) | ||
322 | p = &(*p)->rb_right; | ||
323 | else { | ||
324 | found = 1; | ||
325 | break; | ||
326 | } | ||
327 | } | ||
328 | |||
329 | if (found) | ||
330 | av = tmp_av; | ||
331 | else { | ||
332 | ubi_err("orphaned volume in fastmap pool!"); | ||
333 | return UBI_BAD_FASTMAP; | ||
334 | } | ||
335 | |||
336 | ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id); | ||
337 | |||
338 | return update_vol(ubi, ai, av, new_vh, new_aeb); | ||
339 | } | ||
340 | |||
341 | /** | ||
342 | * unmap_peb - unmap a PEB. | ||
343 | * If fastmap detects a free PEB in the pool it has to check whether | ||
344 | * this PEB has been unmapped after writing the fastmap. | ||
345 | * | ||
346 | * @ai: UBI attach info object | ||
347 | * @pnum: The PEB to be unmapped | ||
348 | */ | ||
349 | static void unmap_peb(struct ubi_attach_info *ai, int pnum) | ||
350 | { | ||
351 | struct ubi_ainf_volume *av; | ||
352 | struct rb_node *node, *node2; | ||
353 | struct ubi_ainf_peb *aeb; | ||
354 | |||
355 | for (node = rb_first(&ai->volumes); node; node = rb_next(node)) { | ||
356 | av = rb_entry(node, struct ubi_ainf_volume, rb); | ||
357 | |||
358 | for (node2 = rb_first(&av->root); node2; | ||
359 | node2 = rb_next(node2)) { | ||
360 | aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb); | ||
361 | if (aeb->pnum == pnum) { | ||
362 | rb_erase(&aeb->u.rb, &av->root); | ||
363 | kmem_cache_free(ai->aeb_slab_cache, aeb); | ||
364 | return; | ||
365 | } | ||
366 | } | ||
367 | } | ||
368 | } | ||
369 | |||
370 | /** | ||
371 | * scan_pool - scans a pool for changed (no longer empty PEBs). | ||
372 | * @ubi: UBI device object | ||
373 | * @ai: attach info object | ||
374 | * @pebs: an array of all PEB numbers in the to be scanned pool | ||
375 | * @pool_size: size of the pool (number of entries in @pebs) | ||
376 | * @max_sqnum: pointer to the maximal sequence number | ||
377 | * @eba_orphans: list of PEBs which need to be scanned | ||
378 | * @free: list of PEBs which are most likely free (and go into @ai->free) | ||
379 | * | ||
380 | * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned. | ||
381 | * < 0 indicates an internal error. | ||
382 | */ | ||
383 | static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, | ||
384 | int *pebs, int pool_size, unsigned long long *max_sqnum, | ||
385 | struct list_head *eba_orphans, struct list_head *free) | ||
386 | { | ||
387 | struct ubi_vid_hdr *vh; | ||
388 | struct ubi_ec_hdr *ech; | ||
389 | struct ubi_ainf_peb *new_aeb, *tmp_aeb; | ||
390 | int i, pnum, err, found_orphan, ret = 0; | ||
391 | |||
392 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | ||
393 | if (!ech) | ||
394 | return -ENOMEM; | ||
395 | |||
396 | vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); | ||
397 | if (!vh) { | ||
398 | kfree(ech); | ||
399 | return -ENOMEM; | ||
400 | } | ||
401 | |||
402 | dbg_bld("scanning fastmap pool: size = %i", pool_size); | ||
403 | |||
404 | /* | ||
405 | * Now scan all PEBs in the pool to find changes which have been made | ||
406 | * after the creation of the fastmap | ||
407 | */ | ||
408 | for (i = 0; i < pool_size; i++) { | ||
409 | int scrub = 0; | ||
410 | |||
411 | pnum = be32_to_cpu(pebs[i]); | ||
412 | |||
413 | if (ubi_io_is_bad(ubi, pnum)) { | ||
414 | ubi_err("bad PEB in fastmap pool!"); | ||
415 | ret = UBI_BAD_FASTMAP; | ||
416 | goto out; | ||
417 | } | ||
418 | |||
419 | err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); | ||
420 | if (err && err != UBI_IO_BITFLIPS) { | ||
421 | ubi_err("unable to read EC header! PEB:%i err:%i", | ||
422 | pnum, err); | ||
423 | ret = err > 0 ? UBI_BAD_FASTMAP : err; | ||
424 | goto out; | ||
425 | } else if (ret == UBI_IO_BITFLIPS) | ||
426 | scrub = 1; | ||
427 | |||
428 | if (be32_to_cpu(ech->image_seq) != ubi->image_seq) { | ||
429 | ubi_err("bad image seq: 0x%x, expected: 0x%x", | ||
430 | be32_to_cpu(ech->image_seq), ubi->image_seq); | ||
431 | err = UBI_BAD_FASTMAP; | ||
432 | goto out; | ||
433 | } | ||
434 | |||
435 | err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); | ||
436 | if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) { | ||
437 | unsigned long long ec = be64_to_cpu(ech->ec); | ||
438 | unmap_peb(ai, pnum); | ||
439 | dbg_bld("Adding PEB to free: %i", pnum); | ||
440 | if (err == UBI_IO_FF_BITFLIPS) | ||
441 | add_aeb(ai, free, pnum, ec, 1); | ||
442 | else | ||
443 | add_aeb(ai, free, pnum, ec, 0); | ||
444 | continue; | ||
445 | } else if (err == 0 || err == UBI_IO_BITFLIPS) { | ||
446 | dbg_bld("Found non empty PEB:%i in pool", pnum); | ||
447 | |||
448 | if (err == UBI_IO_BITFLIPS) | ||
449 | scrub = 1; | ||
450 | |||
451 | found_orphan = 0; | ||
452 | list_for_each_entry(tmp_aeb, eba_orphans, u.list) { | ||
453 | if (tmp_aeb->pnum == pnum) { | ||
454 | found_orphan = 1; | ||
455 | break; | ||
456 | } | ||
457 | } | ||
458 | if (found_orphan) { | ||
459 | kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); | ||
460 | list_del(&tmp_aeb->u.list); | ||
461 | } | ||
462 | |||
463 | new_aeb = kmem_cache_alloc(ai->aeb_slab_cache, | ||
464 | GFP_KERNEL); | ||
465 | if (!new_aeb) { | ||
466 | ret = -ENOMEM; | ||
467 | goto out; | ||
468 | } | ||
469 | |||
470 | new_aeb->ec = be64_to_cpu(ech->ec); | ||
471 | new_aeb->pnum = pnum; | ||
472 | new_aeb->lnum = be32_to_cpu(vh->lnum); | ||
473 | new_aeb->sqnum = be64_to_cpu(vh->sqnum); | ||
474 | new_aeb->copy_flag = vh->copy_flag; | ||
475 | new_aeb->scrub = scrub; | ||
476 | |||
477 | if (*max_sqnum < new_aeb->sqnum) | ||
478 | *max_sqnum = new_aeb->sqnum; | ||
479 | |||
480 | err = process_pool_aeb(ubi, ai, vh, new_aeb); | ||
481 | if (err) { | ||
482 | ret = err > 0 ? UBI_BAD_FASTMAP : err; | ||
483 | goto out; | ||
484 | } | ||
485 | } else { | ||
486 | /* We are paranoid and fall back to scanning mode */ | ||
487 | ubi_err("fastmap pool PEBs contains damaged PEBs!"); | ||
488 | ret = err > 0 ? UBI_BAD_FASTMAP : err; | ||
489 | goto out; | ||
490 | } | ||
491 | |||
492 | } | ||
493 | |||
494 | out: | ||
495 | ubi_free_vid_hdr(ubi, vh); | ||
496 | kfree(ech); | ||
497 | return ret; | ||
498 | } | ||
499 | |||
500 | /** | ||
501 | * count_fastmap_pebs - Counts the PEBs found by fastmap. | ||
502 | * @ai: The UBI attach info object | ||
503 | */ | ||
504 | static int count_fastmap_pebs(struct ubi_attach_info *ai) | ||
505 | { | ||
506 | struct ubi_ainf_peb *aeb; | ||
507 | struct ubi_ainf_volume *av; | ||
508 | struct rb_node *rb1, *rb2; | ||
509 | int n = 0; | ||
510 | |||
511 | list_for_each_entry(aeb, &ai->erase, u.list) | ||
512 | n++; | ||
513 | |||
514 | list_for_each_entry(aeb, &ai->free, u.list) | ||
515 | n++; | ||
516 | |||
517 | ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) | ||
518 | ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) | ||
519 | n++; | ||
520 | |||
521 | return n; | ||
522 | } | ||
523 | |||
524 | /** | ||
525 | * ubi_attach_fastmap - creates ubi_attach_info from a fastmap. | ||
526 | * @ubi: UBI device object | ||
527 | * @ai: UBI attach info object | ||
528 | * @fm: the fastmap to be attached | ||
529 | * | ||
530 | * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable. | ||
531 | * < 0 indicates an internal error. | ||
532 | */ | ||
533 | static int ubi_attach_fastmap(struct ubi_device *ubi, | ||
534 | struct ubi_attach_info *ai, | ||
535 | struct ubi_fastmap_layout *fm) | ||
536 | { | ||
537 | struct list_head used, eba_orphans, free; | ||
538 | struct ubi_ainf_volume *av; | ||
539 | struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb; | ||
540 | struct ubi_ec_hdr *ech; | ||
541 | struct ubi_fm_sb *fmsb; | ||
542 | struct ubi_fm_hdr *fmhdr; | ||
543 | struct ubi_fm_scan_pool *fmpl1, *fmpl2; | ||
544 | struct ubi_fm_ec *fmec; | ||
545 | struct ubi_fm_volhdr *fmvhdr; | ||
546 | struct ubi_fm_eba *fm_eba; | ||
547 | int ret, i, j, pool_size, wl_pool_size; | ||
548 | size_t fm_pos = 0, fm_size = ubi->fm_size; | ||
549 | unsigned long long max_sqnum = 0; | ||
550 | void *fm_raw = ubi->fm_buf; | ||
551 | |||
552 | INIT_LIST_HEAD(&used); | ||
553 | INIT_LIST_HEAD(&free); | ||
554 | INIT_LIST_HEAD(&eba_orphans); | ||
555 | INIT_LIST_HEAD(&ai->corr); | ||
556 | INIT_LIST_HEAD(&ai->free); | ||
557 | INIT_LIST_HEAD(&ai->erase); | ||
558 | INIT_LIST_HEAD(&ai->alien); | ||
559 | ai->volumes = RB_ROOT; | ||
560 | ai->min_ec = UBI_MAX_ERASECOUNTER; | ||
561 | |||
562 | ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab", | ||
563 | sizeof(struct ubi_ainf_peb), | ||
564 | 0, 0, NULL); | ||
565 | if (!ai->aeb_slab_cache) { | ||
566 | ret = -ENOMEM; | ||
567 | goto fail; | ||
568 | } | ||
569 | |||
570 | fmsb = (struct ubi_fm_sb *)(fm_raw); | ||
571 | ai->max_sqnum = fmsb->sqnum; | ||
572 | fm_pos += sizeof(struct ubi_fm_sb); | ||
573 | if (fm_pos >= fm_size) | ||
574 | goto fail_bad; | ||
575 | |||
576 | fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos); | ||
577 | fm_pos += sizeof(*fmhdr); | ||
578 | if (fm_pos >= fm_size) | ||
579 | goto fail_bad; | ||
580 | |||
581 | if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) { | ||
582 | ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x", | ||
583 | be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC); | ||
584 | goto fail_bad; | ||
585 | } | ||
586 | |||
587 | fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); | ||
588 | fm_pos += sizeof(*fmpl1); | ||
589 | if (fm_pos >= fm_size) | ||
590 | goto fail_bad; | ||
591 | if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) { | ||
592 | ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x", | ||
593 | be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC); | ||
594 | goto fail_bad; | ||
595 | } | ||
596 | |||
597 | fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); | ||
598 | fm_pos += sizeof(*fmpl2); | ||
599 | if (fm_pos >= fm_size) | ||
600 | goto fail_bad; | ||
601 | if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) { | ||
602 | ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x", | ||
603 | be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC); | ||
604 | goto fail_bad; | ||
605 | } | ||
606 | |||
607 | pool_size = be16_to_cpu(fmpl1->size); | ||
608 | wl_pool_size = be16_to_cpu(fmpl2->size); | ||
609 | fm->max_pool_size = be16_to_cpu(fmpl1->max_size); | ||
610 | fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size); | ||
611 | |||
612 | if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) { | ||
613 | ubi_err("bad pool size: %i", pool_size); | ||
614 | goto fail_bad; | ||
615 | } | ||
616 | |||
617 | if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) { | ||
618 | ubi_err("bad WL pool size: %i", wl_pool_size); | ||
619 | goto fail_bad; | ||
620 | } | ||
621 | |||
622 | |||
623 | if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE || | ||
624 | fm->max_pool_size < 0) { | ||
625 | ubi_err("bad maximal pool size: %i", fm->max_pool_size); | ||
626 | goto fail_bad; | ||
627 | } | ||
628 | |||
629 | if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE || | ||
630 | fm->max_wl_pool_size < 0) { | ||
631 | ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size); | ||
632 | goto fail_bad; | ||
633 | } | ||
634 | |||
635 | /* read EC values from free list */ | ||
636 | for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) { | ||
637 | fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); | ||
638 | fm_pos += sizeof(*fmec); | ||
639 | if (fm_pos >= fm_size) | ||
640 | goto fail_bad; | ||
641 | |||
642 | add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum), | ||
643 | be32_to_cpu(fmec->ec), 0); | ||
644 | } | ||
645 | |||
646 | /* read EC values from used list */ | ||
647 | for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) { | ||
648 | fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); | ||
649 | fm_pos += sizeof(*fmec); | ||
650 | if (fm_pos >= fm_size) | ||
651 | goto fail_bad; | ||
652 | |||
653 | add_aeb(ai, &used, be32_to_cpu(fmec->pnum), | ||
654 | be32_to_cpu(fmec->ec), 0); | ||
655 | } | ||
656 | |||
657 | /* read EC values from scrub list */ | ||
658 | for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) { | ||
659 | fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); | ||
660 | fm_pos += sizeof(*fmec); | ||
661 | if (fm_pos >= fm_size) | ||
662 | goto fail_bad; | ||
663 | |||
664 | add_aeb(ai, &used, be32_to_cpu(fmec->pnum), | ||
665 | be32_to_cpu(fmec->ec), 1); | ||
666 | } | ||
667 | |||
668 | /* read EC values from erase list */ | ||
669 | for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) { | ||
670 | fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); | ||
671 | fm_pos += sizeof(*fmec); | ||
672 | if (fm_pos >= fm_size) | ||
673 | goto fail_bad; | ||
674 | |||
675 | add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum), | ||
676 | be32_to_cpu(fmec->ec), 1); | ||
677 | } | ||
678 | |||
679 | ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); | ||
680 | ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count); | ||
681 | |||
682 | /* Iterate over all volumes and read their EBA table */ | ||
683 | for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) { | ||
684 | fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); | ||
685 | fm_pos += sizeof(*fmvhdr); | ||
686 | if (fm_pos >= fm_size) | ||
687 | goto fail_bad; | ||
688 | |||
689 | if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) { | ||
690 | ubi_err("bad fastmap vol header magic: 0x%x, " \ | ||
691 | "expected: 0x%x", | ||
692 | be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC); | ||
693 | goto fail_bad; | ||
694 | } | ||
695 | |||
696 | av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id), | ||
697 | be32_to_cpu(fmvhdr->used_ebs), | ||
698 | be32_to_cpu(fmvhdr->data_pad), | ||
699 | fmvhdr->vol_type, | ||
700 | be32_to_cpu(fmvhdr->last_eb_bytes)); | ||
701 | |||
702 | if (!av) | ||
703 | goto fail_bad; | ||
704 | |||
705 | ai->vols_found++; | ||
706 | if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id)) | ||
707 | ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id); | ||
708 | |||
709 | fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos); | ||
710 | fm_pos += sizeof(*fm_eba); | ||
711 | fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs)); | ||
712 | if (fm_pos >= fm_size) | ||
713 | goto fail_bad; | ||
714 | |||
715 | if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) { | ||
716 | ubi_err("bad fastmap EBA header magic: 0x%x, " \ | ||
717 | "expected: 0x%x", | ||
718 | be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC); | ||
719 | goto fail_bad; | ||
720 | } | ||
721 | |||
722 | for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) { | ||
723 | int pnum = be32_to_cpu(fm_eba->pnum[j]); | ||
724 | |||
725 | if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0) | ||
726 | continue; | ||
727 | |||
728 | aeb = NULL; | ||
729 | list_for_each_entry(tmp_aeb, &used, u.list) { | ||
730 | if (tmp_aeb->pnum == pnum) | ||
731 | aeb = tmp_aeb; | ||
732 | } | ||
733 | |||
734 | /* This can happen if a PEB is already in an EBA known | ||
735 | * by this fastmap but the PEB itself is not in the used | ||
736 | * list. | ||
737 | * In this case the PEB can be within the fastmap pool | ||
738 | * or while writing the fastmap it was in the protection | ||
739 | * queue. | ||
740 | */ | ||
741 | if (!aeb) { | ||
742 | aeb = kmem_cache_alloc(ai->aeb_slab_cache, | ||
743 | GFP_KERNEL); | ||
744 | if (!aeb) { | ||
745 | ret = -ENOMEM; | ||
746 | |||
747 | goto fail; | ||
748 | } | ||
749 | |||
750 | aeb->lnum = j; | ||
751 | aeb->pnum = be32_to_cpu(fm_eba->pnum[j]); | ||
752 | aeb->ec = -1; | ||
753 | aeb->scrub = aeb->copy_flag = aeb->sqnum = 0; | ||
754 | list_add_tail(&aeb->u.list, &eba_orphans); | ||
755 | continue; | ||
756 | } | ||
757 | |||
758 | aeb->lnum = j; | ||
759 | |||
760 | if (av->highest_lnum <= aeb->lnum) | ||
761 | av->highest_lnum = aeb->lnum; | ||
762 | |||
763 | assign_aeb_to_av(ai, aeb, av); | ||
764 | |||
765 | dbg_bld("inserting PEB:%i (LEB %i) to vol %i", | ||
766 | aeb->pnum, aeb->lnum, av->vol_id); | ||
767 | } | ||
768 | |||
769 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | ||
770 | if (!ech) { | ||
771 | ret = -ENOMEM; | ||
772 | goto fail; | ||
773 | } | ||
774 | |||
775 | list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, | ||
776 | u.list) { | ||
777 | int err; | ||
778 | |||
779 | if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) { | ||
780 | ubi_err("bad PEB in fastmap EBA orphan list"); | ||
781 | ret = UBI_BAD_FASTMAP; | ||
782 | kfree(ech); | ||
783 | goto fail; | ||
784 | } | ||
785 | |||
786 | err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0); | ||
787 | if (err && err != UBI_IO_BITFLIPS) { | ||
788 | ubi_err("unable to read EC header! PEB:%i " \ | ||
789 | "err:%i", tmp_aeb->pnum, err); | ||
790 | ret = err > 0 ? UBI_BAD_FASTMAP : err; | ||
791 | kfree(ech); | ||
792 | |||
793 | goto fail; | ||
794 | } else if (err == UBI_IO_BITFLIPS) | ||
795 | tmp_aeb->scrub = 1; | ||
796 | |||
797 | tmp_aeb->ec = be64_to_cpu(ech->ec); | ||
798 | assign_aeb_to_av(ai, tmp_aeb, av); | ||
799 | } | ||
800 | |||
801 | kfree(ech); | ||
802 | } | ||
803 | |||
804 | ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, | ||
805 | &eba_orphans, &free); | ||
806 | if (ret) | ||
807 | goto fail; | ||
808 | |||
809 | ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, | ||
810 | &eba_orphans, &free); | ||
811 | if (ret) | ||
812 | goto fail; | ||
813 | |||
814 | if (max_sqnum > ai->max_sqnum) | ||
815 | ai->max_sqnum = max_sqnum; | ||
816 | |||
817 | list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) { | ||
818 | list_del(&tmp_aeb->u.list); | ||
819 | list_add_tail(&tmp_aeb->u.list, &ai->free); | ||
820 | } | ||
821 | |||
822 | /* | ||
823 | * If fastmap is leaking PEBs (must not happen), raise a | ||
824 | * fat warning and fall back to scanning mode. | ||
825 | * We do this here because in ubi_wl_init() it's too late | ||
826 | * and we cannot fall back to scanning. | ||
827 | */ | ||
828 | if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count - | ||
829 | ai->bad_peb_count - fm->used_blocks)) | ||
830 | goto fail_bad; | ||
831 | |||
832 | return 0; | ||
833 | |||
834 | fail_bad: | ||
835 | ret = UBI_BAD_FASTMAP; | ||
836 | fail: | ||
837 | return ret; | ||
838 | } | ||
839 | |||
840 | /** | ||
841 | * ubi_scan_fastmap - scan the fastmap. | ||
842 | * @ubi: UBI device object | ||
843 | * @ai: UBI attach info to be filled | ||
844 | * @fm_anchor: The fastmap starts at this PEB | ||
845 | * | ||
846 | * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found, | ||
847 | * UBI_BAD_FASTMAP if one was found but is not usable. | ||
848 | * < 0 indicates an internal error. | ||
849 | */ | ||
850 | int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, | ||
851 | int fm_anchor) | ||
852 | { | ||
853 | struct ubi_fm_sb *fmsb, *fmsb2; | ||
854 | struct ubi_vid_hdr *vh; | ||
855 | struct ubi_ec_hdr *ech; | ||
856 | struct ubi_fastmap_layout *fm; | ||
857 | int i, used_blocks, pnum, ret = 0; | ||
858 | size_t fm_size; | ||
859 | __be32 crc, tmp_crc; | ||
860 | unsigned long long sqnum = 0; | ||
861 | |||
862 | mutex_lock(&ubi->fm_mutex); | ||
863 | memset(ubi->fm_buf, 0, ubi->fm_size); | ||
864 | |||
865 | fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL); | ||
866 | if (!fmsb) { | ||
867 | ret = -ENOMEM; | ||
868 | goto out; | ||
869 | } | ||
870 | |||
871 | fm = kzalloc(sizeof(*fm), GFP_KERNEL); | ||
872 | if (!fm) { | ||
873 | ret = -ENOMEM; | ||
874 | kfree(fmsb); | ||
875 | goto out; | ||
876 | } | ||
877 | |||
878 | ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb)); | ||
879 | if (ret && ret != UBI_IO_BITFLIPS) | ||
880 | goto free_fm_sb; | ||
881 | else if (ret == UBI_IO_BITFLIPS) | ||
882 | fm->to_be_tortured[0] = 1; | ||
883 | |||
884 | if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) { | ||
885 | ubi_err("bad super block magic: 0x%x, expected: 0x%x", | ||
886 | be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC); | ||
887 | ret = UBI_BAD_FASTMAP; | ||
888 | goto free_fm_sb; | ||
889 | } | ||
890 | |||
891 | if (fmsb->version != UBI_FM_FMT_VERSION) { | ||
892 | ubi_err("bad fastmap version: %i, expected: %i", | ||
893 | fmsb->version, UBI_FM_FMT_VERSION); | ||
894 | ret = UBI_BAD_FASTMAP; | ||
895 | goto free_fm_sb; | ||
896 | } | ||
897 | |||
898 | used_blocks = be32_to_cpu(fmsb->used_blocks); | ||
899 | if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) { | ||
900 | ubi_err("number of fastmap blocks is invalid: %i", used_blocks); | ||
901 | ret = UBI_BAD_FASTMAP; | ||
902 | goto free_fm_sb; | ||
903 | } | ||
904 | |||
905 | fm_size = ubi->leb_size * used_blocks; | ||
906 | if (fm_size != ubi->fm_size) { | ||
907 | ubi_err("bad fastmap size: %zi, expected: %zi", fm_size, | ||
908 | ubi->fm_size); | ||
909 | ret = UBI_BAD_FASTMAP; | ||
910 | goto free_fm_sb; | ||
911 | } | ||
912 | |||
913 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | ||
914 | if (!ech) { | ||
915 | ret = -ENOMEM; | ||
916 | goto free_fm_sb; | ||
917 | } | ||
918 | |||
919 | vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); | ||
920 | if (!vh) { | ||
921 | ret = -ENOMEM; | ||
922 | goto free_hdr; | ||
923 | } | ||
924 | |||
925 | for (i = 0; i < used_blocks; i++) { | ||
926 | pnum = be32_to_cpu(fmsb->block_loc[i]); | ||
927 | |||
928 | if (ubi_io_is_bad(ubi, pnum)) { | ||
929 | ret = UBI_BAD_FASTMAP; | ||
930 | goto free_hdr; | ||
931 | } | ||
932 | |||
933 | ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); | ||
934 | if (ret && ret != UBI_IO_BITFLIPS) { | ||
935 | ubi_err("unable to read fastmap block# %i EC (PEB: %i)", | ||
936 | i, pnum); | ||
937 | if (ret > 0) | ||
938 | ret = UBI_BAD_FASTMAP; | ||
939 | goto free_hdr; | ||
940 | } else if (ret == UBI_IO_BITFLIPS) | ||
941 | fm->to_be_tortured[i] = 1; | ||
942 | |||
943 | if (!ubi->image_seq) | ||
944 | ubi->image_seq = be32_to_cpu(ech->image_seq); | ||
945 | |||
946 | if (be32_to_cpu(ech->image_seq) != ubi->image_seq) { | ||
947 | ret = UBI_BAD_FASTMAP; | ||
948 | goto free_hdr; | ||
949 | } | ||
950 | |||
951 | ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); | ||
952 | if (ret && ret != UBI_IO_BITFLIPS) { | ||
953 | ubi_err("unable to read fastmap block# %i (PEB: %i)", | ||
954 | i, pnum); | ||
955 | goto free_hdr; | ||
956 | } | ||
957 | |||
958 | if (i == 0) { | ||
959 | if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) { | ||
960 | ubi_err("bad fastmap anchor vol_id: 0x%x," \ | ||
961 | " expected: 0x%x", | ||
962 | be32_to_cpu(vh->vol_id), | ||
963 | UBI_FM_SB_VOLUME_ID); | ||
964 | ret = UBI_BAD_FASTMAP; | ||
965 | goto free_hdr; | ||
966 | } | ||
967 | } else { | ||
968 | if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) { | ||
969 | ubi_err("bad fastmap data vol_id: 0x%x," \ | ||
970 | " expected: 0x%x", | ||
971 | be32_to_cpu(vh->vol_id), | ||
972 | UBI_FM_DATA_VOLUME_ID); | ||
973 | ret = UBI_BAD_FASTMAP; | ||
974 | goto free_hdr; | ||
975 | } | ||
976 | } | ||
977 | |||
978 | if (sqnum < be64_to_cpu(vh->sqnum)) | ||
979 | sqnum = be64_to_cpu(vh->sqnum); | ||
980 | |||
981 | ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum, | ||
982 | ubi->leb_start, ubi->leb_size); | ||
983 | if (ret && ret != UBI_IO_BITFLIPS) { | ||
984 | ubi_err("unable to read fastmap block# %i (PEB: %i, " \ | ||
985 | "err: %i)", i, pnum, ret); | ||
986 | goto free_hdr; | ||
987 | } | ||
988 | } | ||
989 | |||
990 | kfree(fmsb); | ||
991 | fmsb = NULL; | ||
992 | |||
993 | fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf); | ||
994 | tmp_crc = be32_to_cpu(fmsb2->data_crc); | ||
995 | fmsb2->data_crc = 0; | ||
996 | crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size); | ||
997 | if (crc != tmp_crc) { | ||
998 | ubi_err("fastmap data CRC is invalid"); | ||
999 | ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc); | ||
1000 | ret = UBI_BAD_FASTMAP; | ||
1001 | goto free_hdr; | ||
1002 | } | ||
1003 | |||
1004 | fmsb2->sqnum = sqnum; | ||
1005 | |||
1006 | fm->used_blocks = used_blocks; | ||
1007 | |||
1008 | ret = ubi_attach_fastmap(ubi, ai, fm); | ||
1009 | if (ret) { | ||
1010 | if (ret > 0) | ||
1011 | ret = UBI_BAD_FASTMAP; | ||
1012 | goto free_hdr; | ||
1013 | } | ||
1014 | |||
1015 | for (i = 0; i < used_blocks; i++) { | ||
1016 | struct ubi_wl_entry *e; | ||
1017 | |||
1018 | e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); | ||
1019 | if (!e) { | ||
1020 | while (i--) | ||
1021 | kfree(fm->e[i]); | ||
1022 | |||
1023 | ret = -ENOMEM; | ||
1024 | goto free_hdr; | ||
1025 | } | ||
1026 | |||
1027 | e->pnum = be32_to_cpu(fmsb2->block_loc[i]); | ||
1028 | e->ec = be32_to_cpu(fmsb2->block_ec[i]); | ||
1029 | fm->e[i] = e; | ||
1030 | } | ||
1031 | |||
1032 | ubi->fm = fm; | ||
1033 | ubi->fm_pool.max_size = ubi->fm->max_pool_size; | ||
1034 | ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size; | ||
1035 | ubi_msg("attached by fastmap"); | ||
1036 | ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size); | ||
1037 | ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size); | ||
1038 | ubi->fm_disabled = 0; | ||
1039 | |||
1040 | ubi_free_vid_hdr(ubi, vh); | ||
1041 | kfree(ech); | ||
1042 | out: | ||
1043 | mutex_unlock(&ubi->fm_mutex); | ||
1044 | if (ret == UBI_BAD_FASTMAP) | ||
1045 | ubi_err("Attach by fastmap failed, doing a full scan!"); | ||
1046 | return ret; | ||
1047 | |||
1048 | free_hdr: | ||
1049 | ubi_free_vid_hdr(ubi, vh); | ||
1050 | kfree(ech); | ||
1051 | free_fm_sb: | ||
1052 | kfree(fmsb); | ||
1053 | kfree(fm); | ||
1054 | goto out; | ||
1055 | } | ||
1056 | |||
1057 | /** | ||
1058 | * ubi_write_fastmap - writes a fastmap. | ||
1059 | * @ubi: UBI device object | ||
1060 | * @new_fm: the to be written fastmap | ||
1061 | * | ||
1062 | * Returns 0 on success, < 0 indicates an internal error. | ||
1063 | */ | ||
1064 | static int ubi_write_fastmap(struct ubi_device *ubi, | ||
1065 | struct ubi_fastmap_layout *new_fm) | ||
1066 | { | ||
1067 | size_t fm_pos = 0; | ||
1068 | void *fm_raw; | ||
1069 | struct ubi_fm_sb *fmsb; | ||
1070 | struct ubi_fm_hdr *fmh; | ||
1071 | struct ubi_fm_scan_pool *fmpl1, *fmpl2; | ||
1072 | struct ubi_fm_ec *fec; | ||
1073 | struct ubi_fm_volhdr *fvh; | ||
1074 | struct ubi_fm_eba *feba; | ||
1075 | struct rb_node *node; | ||
1076 | struct ubi_wl_entry *wl_e; | ||
1077 | struct ubi_volume *vol; | ||
1078 | struct ubi_vid_hdr *avhdr, *dvhdr; | ||
1079 | struct ubi_work *ubi_wrk; | ||
1080 | int ret, i, j, free_peb_count, used_peb_count, vol_count; | ||
1081 | int scrub_peb_count, erase_peb_count; | ||
1082 | |||
1083 | fm_raw = ubi->fm_buf; | ||
1084 | memset(ubi->fm_buf, 0, ubi->fm_size); | ||
1085 | |||
1086 | avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); | ||
1087 | if (!avhdr) { | ||
1088 | ret = -ENOMEM; | ||
1089 | goto out; | ||
1090 | } | ||
1091 | |||
1092 | dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID); | ||
1093 | if (!dvhdr) { | ||
1094 | ret = -ENOMEM; | ||
1095 | goto out_kfree; | ||
1096 | } | ||
1097 | |||
1098 | spin_lock(&ubi->volumes_lock); | ||
1099 | spin_lock(&ubi->wl_lock); | ||
1100 | |||
1101 | fmsb = (struct ubi_fm_sb *)fm_raw; | ||
1102 | fm_pos += sizeof(*fmsb); | ||
1103 | ubi_assert(fm_pos <= ubi->fm_size); | ||
1104 | |||
1105 | fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos); | ||
1106 | fm_pos += sizeof(*fmh); | ||
1107 | ubi_assert(fm_pos <= ubi->fm_size); | ||
1108 | |||
1109 | fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC); | ||
1110 | fmsb->version = UBI_FM_FMT_VERSION; | ||
1111 | fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks); | ||
1112 | /* the max sqnum will be filled in while *reading* the fastmap */ | ||
1113 | fmsb->sqnum = 0; | ||
1114 | |||
1115 | fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC); | ||
1116 | free_peb_count = 0; | ||
1117 | used_peb_count = 0; | ||
1118 | scrub_peb_count = 0; | ||
1119 | erase_peb_count = 0; | ||
1120 | vol_count = 0; | ||
1121 | |||
1122 | fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); | ||
1123 | fm_pos += sizeof(*fmpl1); | ||
1124 | fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); | ||
1125 | fmpl1->size = cpu_to_be16(ubi->fm_pool.size); | ||
1126 | fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size); | ||
1127 | |||
1128 | for (i = 0; i < ubi->fm_pool.size; i++) | ||
1129 | fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]); | ||
1130 | |||
1131 | fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); | ||
1132 | fm_pos += sizeof(*fmpl2); | ||
1133 | fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); | ||
1134 | fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size); | ||
1135 | fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size); | ||
1136 | |||
1137 | for (i = 0; i < ubi->fm_wl_pool.size; i++) | ||
1138 | fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]); | ||
1139 | |||
1140 | for (node = rb_first(&ubi->free); node; node = rb_next(node)) { | ||
1141 | wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); | ||
1142 | fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); | ||
1143 | |||
1144 | fec->pnum = cpu_to_be32(wl_e->pnum); | ||
1145 | fec->ec = cpu_to_be32(wl_e->ec); | ||
1146 | |||
1147 | free_peb_count++; | ||
1148 | fm_pos += sizeof(*fec); | ||
1149 | ubi_assert(fm_pos <= ubi->fm_size); | ||
1150 | } | ||
1151 | fmh->free_peb_count = cpu_to_be32(free_peb_count); | ||
1152 | |||
1153 | for (node = rb_first(&ubi->used); node; node = rb_next(node)) { | ||
1154 | wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); | ||
1155 | fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); | ||
1156 | |||
1157 | fec->pnum = cpu_to_be32(wl_e->pnum); | ||
1158 | fec->ec = cpu_to_be32(wl_e->ec); | ||
1159 | |||
1160 | used_peb_count++; | ||
1161 | fm_pos += sizeof(*fec); | ||
1162 | ubi_assert(fm_pos <= ubi->fm_size); | ||
1163 | } | ||
1164 | fmh->used_peb_count = cpu_to_be32(used_peb_count); | ||
1165 | |||
1166 | for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) { | ||
1167 | wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); | ||
1168 | fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); | ||
1169 | |||
1170 | fec->pnum = cpu_to_be32(wl_e->pnum); | ||
1171 | fec->ec = cpu_to_be32(wl_e->ec); | ||
1172 | |||
1173 | scrub_peb_count++; | ||
1174 | fm_pos += sizeof(*fec); | ||
1175 | ubi_assert(fm_pos <= ubi->fm_size); | ||
1176 | } | ||
1177 | fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count); | ||
1178 | |||
1179 | |||
1180 | list_for_each_entry(ubi_wrk, &ubi->works, list) { | ||
1181 | if (ubi_is_erase_work(ubi_wrk)) { | ||
1182 | wl_e = ubi_wrk->e; | ||
1183 | ubi_assert(wl_e); | ||
1184 | |||
1185 | fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); | ||
1186 | |||
1187 | fec->pnum = cpu_to_be32(wl_e->pnum); | ||
1188 | fec->ec = cpu_to_be32(wl_e->ec); | ||
1189 | |||
1190 | erase_peb_count++; | ||
1191 | fm_pos += sizeof(*fec); | ||
1192 | ubi_assert(fm_pos <= ubi->fm_size); | ||
1193 | } | ||
1194 | } | ||
1195 | fmh->erase_peb_count = cpu_to_be32(erase_peb_count); | ||
1196 | |||
1197 | for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) { | ||
1198 | vol = ubi->volumes[i]; | ||
1199 | |||
1200 | if (!vol) | ||
1201 | continue; | ||
1202 | |||
1203 | vol_count++; | ||
1204 | |||
1205 | fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); | ||
1206 | fm_pos += sizeof(*fvh); | ||
1207 | ubi_assert(fm_pos <= ubi->fm_size); | ||
1208 | |||
1209 | fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC); | ||
1210 | fvh->vol_id = cpu_to_be32(vol->vol_id); | ||
1211 | fvh->vol_type = vol->vol_type; | ||
1212 | fvh->used_ebs = cpu_to_be32(vol->used_ebs); | ||
1213 | fvh->data_pad = cpu_to_be32(vol->data_pad); | ||
1214 | fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes); | ||
1215 | |||
1216 | ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME || | ||
1217 | vol->vol_type == UBI_STATIC_VOLUME); | ||
1218 | |||
1219 | feba = (struct ubi_fm_eba *)(fm_raw + fm_pos); | ||
1220 | fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs); | ||
1221 | ubi_assert(fm_pos <= ubi->fm_size); | ||
1222 | |||
1223 | for (j = 0; j < vol->reserved_pebs; j++) | ||
1224 | feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]); | ||
1225 | |||
1226 | feba->reserved_pebs = cpu_to_be32(j); | ||
1227 | feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC); | ||
1228 | } | ||
1229 | fmh->vol_count = cpu_to_be32(vol_count); | ||
1230 | fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count); | ||
1231 | |||
1232 | avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); | ||
1233 | avhdr->lnum = 0; | ||
1234 | |||
1235 | spin_unlock(&ubi->wl_lock); | ||
1236 | spin_unlock(&ubi->volumes_lock); | ||
1237 | |||
1238 | dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum); | ||
1239 | ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr); | ||
1240 | if (ret) { | ||
1241 | ubi_err("unable to write vid_hdr to fastmap SB!"); | ||
1242 | goto out_kfree; | ||
1243 | } | ||
1244 | |||
1245 | for (i = 0; i < new_fm->used_blocks; i++) { | ||
1246 | fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum); | ||
1247 | fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec); | ||
1248 | } | ||
1249 | |||
1250 | fmsb->data_crc = 0; | ||
1251 | fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw, | ||
1252 | ubi->fm_size)); | ||
1253 | |||
1254 | for (i = 1; i < new_fm->used_blocks; i++) { | ||
1255 | dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); | ||
1256 | dvhdr->lnum = cpu_to_be32(i); | ||
1257 | dbg_bld("writing fastmap data to PEB %i sqnum %llu", | ||
1258 | new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum)); | ||
1259 | ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr); | ||
1260 | if (ret) { | ||
1261 | ubi_err("unable to write vid_hdr to PEB %i!", | ||
1262 | new_fm->e[i]->pnum); | ||
1263 | goto out_kfree; | ||
1264 | } | ||
1265 | } | ||
1266 | |||
1267 | for (i = 0; i < new_fm->used_blocks; i++) { | ||
1268 | ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size), | ||
1269 | new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size); | ||
1270 | if (ret) { | ||
1271 | ubi_err("unable to write fastmap to PEB %i!", | ||
1272 | new_fm->e[i]->pnum); | ||
1273 | goto out_kfree; | ||
1274 | } | ||
1275 | } | ||
1276 | |||
1277 | ubi_assert(new_fm); | ||
1278 | ubi->fm = new_fm; | ||
1279 | |||
1280 | dbg_bld("fastmap written!"); | ||
1281 | |||
1282 | out_kfree: | ||
1283 | ubi_free_vid_hdr(ubi, avhdr); | ||
1284 | ubi_free_vid_hdr(ubi, dvhdr); | ||
1285 | out: | ||
1286 | return ret; | ||
1287 | } | ||
1288 | |||
1289 | /** | ||
1290 | * erase_block - Manually erase a PEB. | ||
1291 | * @ubi: UBI device object | ||
1292 | * @pnum: PEB to be erased | ||
1293 | * | ||
1294 | * Returns the new EC value on success, < 0 indicates an internal error. | ||
1295 | */ | ||
1296 | static int erase_block(struct ubi_device *ubi, int pnum) | ||
1297 | { | ||
1298 | int ret; | ||
1299 | struct ubi_ec_hdr *ec_hdr; | ||
1300 | long long ec; | ||
1301 | |||
1302 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | ||
1303 | if (!ec_hdr) | ||
1304 | return -ENOMEM; | ||
1305 | |||
1306 | ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); | ||
1307 | if (ret < 0) | ||
1308 | goto out; | ||
1309 | else if (ret && ret != UBI_IO_BITFLIPS) { | ||
1310 | ret = -EINVAL; | ||
1311 | goto out; | ||
1312 | } | ||
1313 | |||
1314 | ret = ubi_io_sync_erase(ubi, pnum, 0); | ||
1315 | if (ret < 0) | ||
1316 | goto out; | ||
1317 | |||
1318 | ec = be64_to_cpu(ec_hdr->ec); | ||
1319 | ec += ret; | ||
1320 | if (ec > UBI_MAX_ERASECOUNTER) { | ||
1321 | ret = -EINVAL; | ||
1322 | goto out; | ||
1323 | } | ||
1324 | |||
1325 | ec_hdr->ec = cpu_to_be64(ec); | ||
1326 | ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); | ||
1327 | if (ret < 0) | ||
1328 | goto out; | ||
1329 | |||
1330 | ret = ec; | ||
1331 | out: | ||
1332 | kfree(ec_hdr); | ||
1333 | return ret; | ||
1334 | } | ||
1335 | |||
1336 | /** | ||
1337 | * invalidate_fastmap - destroys a fastmap. | ||
1338 | * @ubi: UBI device object | ||
1339 | * @fm: the fastmap to be destroyed | ||
1340 | * | ||
1341 | * Returns 0 on success, < 0 indicates an internal error. | ||
1342 | */ | ||
1343 | static int invalidate_fastmap(struct ubi_device *ubi, | ||
1344 | struct ubi_fastmap_layout *fm) | ||
1345 | { | ||
1346 | int ret, i; | ||
1347 | struct ubi_vid_hdr *vh; | ||
1348 | |||
1349 | ret = erase_block(ubi, fm->e[0]->pnum); | ||
1350 | if (ret < 0) | ||
1351 | return ret; | ||
1352 | |||
1353 | vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); | ||
1354 | if (!vh) | ||
1355 | return -ENOMEM; | ||
1356 | |||
1357 | /* deleting the current fastmap SB is not enough, an old SB may exist, | ||
1358 | * so create a (corrupted) SB such that fastmap will find it and fall | ||
1359 | * back to scanning mode in any case */ | ||
1360 | vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); | ||
1361 | ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh); | ||
1362 | |||
1363 | for (i = 0; i < fm->used_blocks; i++) | ||
1364 | ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]); | ||
1365 | |||
1366 | return ret; | ||
1367 | } | ||
1368 | |||
1369 | /** | ||
1370 | * ubi_update_fastmap - will be called by UBI if a volume changes or | ||
1371 | * a fastmap pool becomes full. | ||
1372 | * @ubi: UBI device object | ||
1373 | * | ||
1374 | * Returns 0 on success, < 0 indicates an internal error. | ||
1375 | */ | ||
1376 | int ubi_update_fastmap(struct ubi_device *ubi) | ||
1377 | { | ||
1378 | int ret, i; | ||
1379 | struct ubi_fastmap_layout *new_fm, *old_fm; | ||
1380 | struct ubi_wl_entry *tmp_e; | ||
1381 | |||
1382 | mutex_lock(&ubi->fm_mutex); | ||
1383 | |||
1384 | ubi_refill_pools(ubi); | ||
1385 | |||
1386 | if (ubi->ro_mode || ubi->fm_disabled) { | ||
1387 | mutex_unlock(&ubi->fm_mutex); | ||
1388 | return 0; | ||
1389 | } | ||
1390 | |||
1391 | ret = ubi_ensure_anchor_pebs(ubi); | ||
1392 | if (ret) { | ||
1393 | mutex_unlock(&ubi->fm_mutex); | ||
1394 | return ret; | ||
1395 | } | ||
1396 | |||
1397 | new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); | ||
1398 | if (!new_fm) { | ||
1399 | mutex_unlock(&ubi->fm_mutex); | ||
1400 | return -ENOMEM; | ||
1401 | } | ||
1402 | |||
1403 | new_fm->used_blocks = ubi->fm_size / ubi->leb_size; | ||
1404 | |||
1405 | for (i = 0; i < new_fm->used_blocks; i++) { | ||
1406 | new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); | ||
1407 | if (!new_fm->e[i]) { | ||
1408 | while (i--) | ||
1409 | kfree(new_fm->e[i]); | ||
1410 | |||
1411 | kfree(new_fm); | ||
1412 | mutex_unlock(&ubi->fm_mutex); | ||
1413 | return -ENOMEM; | ||
1414 | } | ||
1415 | } | ||
1416 | |||
1417 | old_fm = ubi->fm; | ||
1418 | ubi->fm = NULL; | ||
1419 | |||
1420 | if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) { | ||
1421 | ubi_err("fastmap too large"); | ||
1422 | ret = -ENOSPC; | ||
1423 | goto err; | ||
1424 | } | ||
1425 | |||
1426 | for (i = 1; i < new_fm->used_blocks; i++) { | ||
1427 | spin_lock(&ubi->wl_lock); | ||
1428 | tmp_e = ubi_wl_get_fm_peb(ubi, 0); | ||
1429 | spin_unlock(&ubi->wl_lock); | ||
1430 | |||
1431 | if (!tmp_e && !old_fm) { | ||
1432 | int j; | ||
1433 | ubi_err("could not get any free erase block"); | ||
1434 | |||
1435 | for (j = 1; j < i; j++) | ||
1436 | ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0); | ||
1437 | |||
1438 | ret = -ENOSPC; | ||
1439 | goto err; | ||
1440 | } else if (!tmp_e && old_fm) { | ||
1441 | ret = erase_block(ubi, old_fm->e[i]->pnum); | ||
1442 | if (ret < 0) { | ||
1443 | int j; | ||
1444 | |||
1445 | for (j = 1; j < i; j++) | ||
1446 | ubi_wl_put_fm_peb(ubi, new_fm->e[j], | ||
1447 | j, 0); | ||
1448 | |||
1449 | ubi_err("could not erase old fastmap PEB"); | ||
1450 | goto err; | ||
1451 | } | ||
1452 | |||
1453 | new_fm->e[i]->pnum = old_fm->e[i]->pnum; | ||
1454 | new_fm->e[i]->ec = old_fm->e[i]->ec; | ||
1455 | } else { | ||
1456 | new_fm->e[i]->pnum = tmp_e->pnum; | ||
1457 | new_fm->e[i]->ec = tmp_e->ec; | ||
1458 | |||
1459 | if (old_fm) | ||
1460 | ubi_wl_put_fm_peb(ubi, old_fm->e[i], i, | ||
1461 | old_fm->to_be_tortured[i]); | ||
1462 | } | ||
1463 | } | ||
1464 | |||
1465 | spin_lock(&ubi->wl_lock); | ||
1466 | tmp_e = ubi_wl_get_fm_peb(ubi, 1); | ||
1467 | spin_unlock(&ubi->wl_lock); | ||
1468 | |||
1469 | if (old_fm) { | ||
1470 | /* no fresh anchor PEB was found, reuse the old one */ | ||
1471 | if (!tmp_e) { | ||
1472 | ret = erase_block(ubi, old_fm->e[0]->pnum); | ||
1473 | if (ret < 0) { | ||
1474 | int i; | ||
1475 | ubi_err("could not erase old anchor PEB"); | ||
1476 | |||
1477 | for (i = 1; i < new_fm->used_blocks; i++) | ||
1478 | ubi_wl_put_fm_peb(ubi, new_fm->e[i], | ||
1479 | i, 0); | ||
1480 | goto err; | ||
1481 | } | ||
1482 | |||
1483 | new_fm->e[0]->pnum = old_fm->e[0]->pnum; | ||
1484 | new_fm->e[0]->ec = ret; | ||
1485 | } else { | ||
1486 | /* we've got a new anchor PEB, return the old one */ | ||
1487 | ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0, | ||
1488 | old_fm->to_be_tortured[0]); | ||
1489 | |||
1490 | new_fm->e[0]->pnum = tmp_e->pnum; | ||
1491 | new_fm->e[0]->ec = tmp_e->ec; | ||
1492 | } | ||
1493 | } else { | ||
1494 | if (!tmp_e) { | ||
1495 | int i; | ||
1496 | ubi_err("could not find any anchor PEB"); | ||
1497 | |||
1498 | for (i = 1; i < new_fm->used_blocks; i++) | ||
1499 | ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0); | ||
1500 | |||
1501 | ret = -ENOSPC; | ||
1502 | goto err; | ||
1503 | } | ||
1504 | |||
1505 | new_fm->e[0]->pnum = tmp_e->pnum; | ||
1506 | new_fm->e[0]->ec = tmp_e->ec; | ||
1507 | } | ||
1508 | |||
1509 | down_write(&ubi->work_sem); | ||
1510 | down_write(&ubi->fm_sem); | ||
1511 | ret = ubi_write_fastmap(ubi, new_fm); | ||
1512 | up_write(&ubi->fm_sem); | ||
1513 | up_write(&ubi->work_sem); | ||
1514 | |||
1515 | if (ret) | ||
1516 | goto err; | ||
1517 | |||
1518 | out_unlock: | ||
1519 | mutex_unlock(&ubi->fm_mutex); | ||
1520 | kfree(old_fm); | ||
1521 | return ret; | ||
1522 | |||
1523 | err: | ||
1524 | kfree(new_fm); | ||
1525 | |||
1526 | ubi_warn("Unable to write new fastmap, err=%i", ret); | ||
1527 | |||
1528 | ret = 0; | ||
1529 | if (old_fm) { | ||
1530 | ret = invalidate_fastmap(ubi, old_fm); | ||
1531 | if (ret < 0) | ||
1532 | ubi_err("Unable to invalidiate current fastmap!"); | ||
1533 | else if (ret) | ||
1534 | ret = 0; | ||
1535 | } | ||
1536 | goto out_unlock; | ||
1537 | } | ||
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h index 468ffbc0eabd..ac2b24d1783d 100644 --- a/drivers/mtd/ubi/ubi-media.h +++ b/drivers/mtd/ubi/ubi-media.h | |||
@@ -375,4 +375,141 @@ struct ubi_vtbl_record { | |||
375 | __be32 crc; | 375 | __be32 crc; |
376 | } __packed; | 376 | } __packed; |
377 | 377 | ||
378 | /* UBI fastmap on-flash data structures */ | ||
379 | |||
380 | #define UBI_FM_SB_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 1) | ||
381 | #define UBI_FM_DATA_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 2) | ||
382 | |||
383 | /* fastmap on-flash data structure format version */ | ||
384 | #define UBI_FM_FMT_VERSION 1 | ||
385 | |||
386 | #define UBI_FM_SB_MAGIC 0x7B11D69F | ||
387 | #define UBI_FM_HDR_MAGIC 0xD4B82EF7 | ||
388 | #define UBI_FM_VHDR_MAGIC 0xFA370ED1 | ||
389 | #define UBI_FM_POOL_MAGIC 0x67AF4D08 | ||
390 | #define UBI_FM_EBA_MAGIC 0xf0c040a8 | ||
391 | |||
392 | /* A fastmap supber block can be located between PEB 0 and | ||
393 | * UBI_FM_MAX_START */ | ||
394 | #define UBI_FM_MAX_START 64 | ||
395 | |||
396 | /* A fastmap can use up to UBI_FM_MAX_BLOCKS PEBs */ | ||
397 | #define UBI_FM_MAX_BLOCKS 32 | ||
398 | |||
399 | /* 5% of the total number of PEBs have to be scanned while attaching | ||
400 | * from a fastmap. | ||
401 | * But the size of this pool is limited to be between UBI_FM_MIN_POOL_SIZE and | ||
402 | * UBI_FM_MAX_POOL_SIZE */ | ||
403 | #define UBI_FM_MIN_POOL_SIZE 8 | ||
404 | #define UBI_FM_MAX_POOL_SIZE 256 | ||
405 | |||
406 | #define UBI_FM_WL_POOL_SIZE 25 | ||
407 | |||
408 | /** | ||
409 | * struct ubi_fm_sb - UBI fastmap super block | ||
410 | * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC) | ||
411 | * @version: format version of this fastmap | ||
412 | * @data_crc: CRC over the fastmap data | ||
413 | * @used_blocks: number of PEBs used by this fastmap | ||
414 | * @block_loc: an array containing the location of all PEBs of the fastmap | ||
415 | * @block_ec: the erase counter of each used PEB | ||
416 | * @sqnum: highest sequence number value at the time while taking the fastmap | ||
417 | * | ||
418 | */ | ||
419 | struct ubi_fm_sb { | ||
420 | __be32 magic; | ||
421 | __u8 version; | ||
422 | __u8 padding1[3]; | ||
423 | __be32 data_crc; | ||
424 | __be32 used_blocks; | ||
425 | __be32 block_loc[UBI_FM_MAX_BLOCKS]; | ||
426 | __be32 block_ec[UBI_FM_MAX_BLOCKS]; | ||
427 | __be64 sqnum; | ||
428 | __u8 padding2[32]; | ||
429 | } __packed; | ||
430 | |||
431 | /** | ||
432 | * struct ubi_fm_hdr - header of the fastmap data set | ||
433 | * @magic: fastmap header magic number (%UBI_FM_HDR_MAGIC) | ||
434 | * @free_peb_count: number of free PEBs known by this fastmap | ||
435 | * @used_peb_count: number of used PEBs known by this fastmap | ||
436 | * @scrub_peb_count: number of to be scrubbed PEBs known by this fastmap | ||
437 | * @bad_peb_count: number of bad PEBs known by this fastmap | ||
438 | * @erase_peb_count: number of bad PEBs which have to be erased | ||
439 | * @vol_count: number of UBI volumes known by this fastmap | ||
440 | */ | ||
441 | struct ubi_fm_hdr { | ||
442 | __be32 magic; | ||
443 | __be32 free_peb_count; | ||
444 | __be32 used_peb_count; | ||
445 | __be32 scrub_peb_count; | ||
446 | __be32 bad_peb_count; | ||
447 | __be32 erase_peb_count; | ||
448 | __be32 vol_count; | ||
449 | __u8 padding[4]; | ||
450 | } __packed; | ||
451 | |||
452 | /* struct ubi_fm_hdr is followed by two struct ubi_fm_scan_pool */ | ||
453 | |||
454 | /** | ||
455 | * struct ubi_fm_scan_pool - Fastmap pool PEBs to be scanned while attaching | ||
456 | * @magic: pool magic numer (%UBI_FM_POOL_MAGIC) | ||
457 | * @size: current pool size | ||
458 | * @max_size: maximal pool size | ||
459 | * @pebs: an array containing the location of all PEBs in this pool | ||
460 | */ | ||
461 | struct ubi_fm_scan_pool { | ||
462 | __be32 magic; | ||
463 | __be16 size; | ||
464 | __be16 max_size; | ||
465 | __be32 pebs[UBI_FM_MAX_POOL_SIZE]; | ||
466 | __be32 padding[4]; | ||
467 | } __packed; | ||
468 | |||
469 | /* ubi_fm_scan_pool is followed by nfree+nused struct ubi_fm_ec records */ | ||
470 | |||
471 | /** | ||
472 | * struct ubi_fm_ec - stores the erase counter of a PEB | ||
473 | * @pnum: PEB number | ||
474 | * @ec: ec of this PEB | ||
475 | */ | ||
476 | struct ubi_fm_ec { | ||
477 | __be32 pnum; | ||
478 | __be32 ec; | ||
479 | } __packed; | ||
480 | |||
481 | /** | ||
482 | * struct ubi_fm_volhdr - Fastmap volume header | ||
483 | * it identifies the start of an eba table | ||
484 | * @magic: Fastmap volume header magic number (%UBI_FM_VHDR_MAGIC) | ||
485 | * @vol_id: volume id of the fastmapped volume | ||
486 | * @vol_type: type of the fastmapped volume | ||
487 | * @data_pad: data_pad value of the fastmapped volume | ||
488 | * @used_ebs: number of used LEBs within this volume | ||
489 | * @last_eb_bytes: number of bytes used in the last LEB | ||
490 | */ | ||
491 | struct ubi_fm_volhdr { | ||
492 | __be32 magic; | ||
493 | __be32 vol_id; | ||
494 | __u8 vol_type; | ||
495 | __u8 padding1[3]; | ||
496 | __be32 data_pad; | ||
497 | __be32 used_ebs; | ||
498 | __be32 last_eb_bytes; | ||
499 | __u8 padding2[8]; | ||
500 | } __packed; | ||
501 | |||
502 | /* struct ubi_fm_volhdr is followed by one struct ubi_fm_eba records */ | ||
503 | |||
504 | /** | ||
505 | * struct ubi_fm_eba - denotes an association beween a PEB and LEB | ||
506 | * @magic: EBA table magic number | ||
507 | * @reserved_pebs: number of table entries | ||
508 | * @pnum: PEB number of LEB (LEB is the index) | ||
509 | */ | ||
510 | struct ubi_fm_eba { | ||
511 | __be32 magic; | ||
512 | __be32 reserved_pebs; | ||
513 | __be32 pnum[0]; | ||
514 | } __packed; | ||
378 | #endif /* !__UBI_MEDIA_H__ */ | 515 | #endif /* !__UBI_MEDIA_H__ */ |
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 383ee43d2425..7d57469723cf 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h | |||
@@ -133,6 +133,17 @@ enum { | |||
133 | MOVE_RETRY, | 133 | MOVE_RETRY, |
134 | }; | 134 | }; |
135 | 135 | ||
136 | /* | ||
137 | * Return codes of the fastmap sub-system | ||
138 | * | ||
139 | * UBI_NO_FASTMAP: No fastmap super block was found | ||
140 | * UBI_BAD_FASTMAP: A fastmap was found but it's unusable | ||
141 | */ | ||
142 | enum { | ||
143 | UBI_NO_FASTMAP = 1, | ||
144 | UBI_BAD_FASTMAP, | ||
145 | }; | ||
146 | |||
136 | /** | 147 | /** |
137 | * struct ubi_wl_entry - wear-leveling entry. | 148 | * struct ubi_wl_entry - wear-leveling entry. |
138 | * @u.rb: link in the corresponding (free/used) RB-tree | 149 | * @u.rb: link in the corresponding (free/used) RB-tree |
@@ -199,6 +210,41 @@ struct ubi_rename_entry { | |||
199 | struct ubi_volume_desc; | 210 | struct ubi_volume_desc; |
200 | 211 | ||
201 | /** | 212 | /** |
213 | * struct ubi_fastmap_layout - in-memory fastmap data structure. | ||
214 | * @e: PEBs used by the current fastmap | ||
215 | * @to_be_tortured: if non-zero tortured this PEB | ||
216 | * @used_blocks: number of used PEBs | ||
217 | * @max_pool_size: maximal size of the user pool | ||
218 | * @max_wl_pool_size: maximal size of the pool used by the WL sub-system | ||
219 | */ | ||
220 | struct ubi_fastmap_layout { | ||
221 | struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS]; | ||
222 | int to_be_tortured[UBI_FM_MAX_BLOCKS]; | ||
223 | int used_blocks; | ||
224 | int max_pool_size; | ||
225 | int max_wl_pool_size; | ||
226 | }; | ||
227 | |||
228 | /** | ||
229 | * struct ubi_fm_pool - in-memory fastmap pool | ||
230 | * @pebs: PEBs in this pool | ||
231 | * @used: number of used PEBs | ||
232 | * @size: total number of PEBs in this pool | ||
233 | * @max_size: maximal size of the pool | ||
234 | * | ||
235 | * A pool gets filled with up to max_size. | ||
236 | * If all PEBs within the pool are used a new fastmap will be written | ||
237 | * to the flash and the pool gets refilled with empty PEBs. | ||
238 | * | ||
239 | */ | ||
240 | struct ubi_fm_pool { | ||
241 | int pebs[UBI_FM_MAX_POOL_SIZE]; | ||
242 | int used; | ||
243 | int size; | ||
244 | int max_size; | ||
245 | }; | ||
246 | |||
247 | /** | ||
202 | * struct ubi_volume - UBI volume description data structure. | 248 | * struct ubi_volume - UBI volume description data structure. |
203 | * @dev: device object to make use of the the Linux device model | 249 | * @dev: device object to make use of the the Linux device model |
204 | * @cdev: character device object to create character device | 250 | * @cdev: character device object to create character device |
@@ -333,9 +379,21 @@ struct ubi_wl_entry; | |||
333 | * @ltree: the lock tree | 379 | * @ltree: the lock tree |
334 | * @alc_mutex: serializes "atomic LEB change" operations | 380 | * @alc_mutex: serializes "atomic LEB change" operations |
335 | * | 381 | * |
382 | * @fm_disabled: non-zero if fastmap is disabled (default) | ||
383 | * @fm: in-memory data structure of the currently used fastmap | ||
384 | * @fm_pool: in-memory data structure of the fastmap pool | ||
385 | * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL | ||
386 | * sub-system | ||
387 | * @fm_mutex: serializes ubi_update_fastmap() and protects @fm_buf | ||
388 | * @fm_buf: vmalloc()'d buffer which holds the raw fastmap | ||
389 | * @fm_size: fastmap size in bytes | ||
390 | * @fm_sem: allows ubi_update_fastmap() to block EBA table changes | ||
391 | * @fm_work: fastmap work queue | ||
392 | * | ||
336 | * @used: RB-tree of used physical eraseblocks | 393 | * @used: RB-tree of used physical eraseblocks |
337 | * @erroneous: RB-tree of erroneous used physical eraseblocks | 394 | * @erroneous: RB-tree of erroneous used physical eraseblocks |
338 | * @free: RB-tree of free physical eraseblocks | 395 | * @free: RB-tree of free physical eraseblocks |
396 | * @free_count: Contains the number of elements in @free | ||
339 | * @scrub: RB-tree of physical eraseblocks which need scrubbing | 397 | * @scrub: RB-tree of physical eraseblocks which need scrubbing |
340 | * @pq: protection queue (contain physical eraseblocks which are temporarily | 398 | * @pq: protection queue (contain physical eraseblocks which are temporarily |
341 | * protected from the wear-leveling worker) | 399 | * protected from the wear-leveling worker) |
@@ -426,10 +484,22 @@ struct ubi_device { | |||
426 | struct rb_root ltree; | 484 | struct rb_root ltree; |
427 | struct mutex alc_mutex; | 485 | struct mutex alc_mutex; |
428 | 486 | ||
487 | /* Fastmap stuff */ | ||
488 | int fm_disabled; | ||
489 | struct ubi_fastmap_layout *fm; | ||
490 | struct ubi_fm_pool fm_pool; | ||
491 | struct ubi_fm_pool fm_wl_pool; | ||
492 | struct rw_semaphore fm_sem; | ||
493 | struct mutex fm_mutex; | ||
494 | void *fm_buf; | ||
495 | size_t fm_size; | ||
496 | struct work_struct fm_work; | ||
497 | |||
429 | /* Wear-leveling sub-system's stuff */ | 498 | /* Wear-leveling sub-system's stuff */ |
430 | struct rb_root used; | 499 | struct rb_root used; |
431 | struct rb_root erroneous; | 500 | struct rb_root erroneous; |
432 | struct rb_root free; | 501 | struct rb_root free; |
502 | int free_count; | ||
433 | struct rb_root scrub; | 503 | struct rb_root scrub; |
434 | struct list_head pq[UBI_PROT_QUEUE_LEN]; | 504 | struct list_head pq[UBI_PROT_QUEUE_LEN]; |
435 | int pq_head; | 505 | int pq_head; |
@@ -596,6 +666,32 @@ struct ubi_attach_info { | |||
596 | struct kmem_cache *aeb_slab_cache; | 666 | struct kmem_cache *aeb_slab_cache; |
597 | }; | 667 | }; |
598 | 668 | ||
669 | /** | ||
670 | * struct ubi_work - UBI work description data structure. | ||
671 | * @list: a link in the list of pending works | ||
672 | * @func: worker function | ||
673 | * @e: physical eraseblock to erase | ||
674 | * @vol_id: the volume ID on which this erasure is being performed | ||
675 | * @lnum: the logical eraseblock number | ||
676 | * @torture: if the physical eraseblock has to be tortured | ||
677 | * @anchor: produce a anchor PEB to by used by fastmap | ||
678 | * | ||
679 | * The @func pointer points to the worker function. If the @cancel argument is | ||
680 | * not zero, the worker has to free the resources and exit immediately. The | ||
681 | * worker has to return zero in case of success and a negative error code in | ||
682 | * case of failure. | ||
683 | */ | ||
684 | struct ubi_work { | ||
685 | struct list_head list; | ||
686 | int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel); | ||
687 | /* The below fields are only relevant to erasure works */ | ||
688 | struct ubi_wl_entry *e; | ||
689 | int vol_id; | ||
690 | int lnum; | ||
691 | int torture; | ||
692 | int anchor; | ||
693 | }; | ||
694 | |||
599 | #include "debug.h" | 695 | #include "debug.h" |
600 | 696 | ||
601 | extern struct kmem_cache *ubi_wl_entry_slab; | 697 | extern struct kmem_cache *ubi_wl_entry_slab; |
@@ -606,7 +702,7 @@ extern struct class *ubi_class; | |||
606 | extern struct mutex ubi_devices_mutex; | 702 | extern struct mutex ubi_devices_mutex; |
607 | extern struct blocking_notifier_head ubi_notifiers; | 703 | extern struct blocking_notifier_head ubi_notifiers; |
608 | 704 | ||
609 | /* scan.c */ | 705 | /* attach.c */ |
610 | int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, | 706 | int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, |
611 | int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips); | 707 | int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips); |
612 | struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, | 708 | struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, |
@@ -614,7 +710,7 @@ struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, | |||
614 | void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av); | 710 | void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av); |
615 | struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi, | 711 | struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi, |
616 | struct ubi_attach_info *ai); | 712 | struct ubi_attach_info *ai); |
617 | int ubi_attach(struct ubi_device *ubi); | 713 | int ubi_attach(struct ubi_device *ubi, int force_scan); |
618 | void ubi_destroy_ai(struct ubi_attach_info *ai); | 714 | void ubi_destroy_ai(struct ubi_attach_info *ai); |
619 | 715 | ||
620 | /* vtbl.c */ | 716 | /* vtbl.c */ |
@@ -664,6 +760,9 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, | |||
664 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | 760 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, |
665 | struct ubi_vid_hdr *vid_hdr); | 761 | struct ubi_vid_hdr *vid_hdr); |
666 | int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai); | 762 | int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai); |
763 | unsigned long long ubi_next_sqnum(struct ubi_device *ubi); | ||
764 | int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap, | ||
765 | struct ubi_attach_info *ai_scan); | ||
667 | 766 | ||
668 | /* wl.c */ | 767 | /* wl.c */ |
669 | int ubi_wl_get_peb(struct ubi_device *ubi); | 768 | int ubi_wl_get_peb(struct ubi_device *ubi); |
@@ -674,6 +773,12 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum); | |||
674 | int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai); | 773 | int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai); |
675 | void ubi_wl_close(struct ubi_device *ubi); | 774 | void ubi_wl_close(struct ubi_device *ubi); |
676 | int ubi_thread(void *u); | 775 | int ubi_thread(void *u); |
776 | struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor); | ||
777 | int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e, | ||
778 | int lnum, int torture); | ||
779 | int ubi_is_erase_work(struct ubi_work *wrk); | ||
780 | void ubi_refill_pools(struct ubi_device *ubi); | ||
781 | int ubi_ensure_anchor_pebs(struct ubi_device *ubi); | ||
677 | 782 | ||
678 | /* io.c */ | 783 | /* io.c */ |
679 | int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, | 784 | int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, |
@@ -711,6 +816,15 @@ void ubi_free_internal_volumes(struct ubi_device *ubi); | |||
711 | void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di); | 816 | void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di); |
712 | void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol, | 817 | void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol, |
713 | struct ubi_volume_info *vi); | 818 | struct ubi_volume_info *vi); |
819 | /* scan.c */ | ||
820 | int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, | ||
821 | int pnum, const struct ubi_vid_hdr *vid_hdr); | ||
822 | |||
823 | /* fastmap.c */ | ||
824 | size_t ubi_calc_fm_size(struct ubi_device *ubi); | ||
825 | int ubi_update_fastmap(struct ubi_device *ubi); | ||
826 | int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, | ||
827 | int fm_anchor); | ||
714 | 828 | ||
715 | /* | 829 | /* |
716 | * ubi_rb_for_each_entry - walk an RB-tree. | 830 | * ubi_rb_for_each_entry - walk an RB-tree. |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 032fc57f1090..da7b44998b40 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -135,36 +135,48 @@ | |||
135 | */ | 135 | */ |
136 | #define WL_MAX_FAILURES 32 | 136 | #define WL_MAX_FAILURES 32 |
137 | 137 | ||
138 | /** | ||
139 | * struct ubi_work - UBI work description data structure. | ||
140 | * @list: a link in the list of pending works | ||
141 | * @func: worker function | ||
142 | * @e: physical eraseblock to erase | ||
143 | * @vol_id: the volume ID on which this erasure is being performed | ||
144 | * @lnum: the logical eraseblock number | ||
145 | * @torture: if the physical eraseblock has to be tortured | ||
146 | * | ||
147 | * The @func pointer points to the worker function. If the @cancel argument is | ||
148 | * not zero, the worker has to free the resources and exit immediately. The | ||
149 | * worker has to return zero in case of success and a negative error code in | ||
150 | * case of failure. | ||
151 | */ | ||
152 | struct ubi_work { | ||
153 | struct list_head list; | ||
154 | int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel); | ||
155 | /* The below fields are only relevant to erasure works */ | ||
156 | struct ubi_wl_entry *e; | ||
157 | int vol_id; | ||
158 | int lnum; | ||
159 | int torture; | ||
160 | }; | ||
161 | |||
162 | static int self_check_ec(struct ubi_device *ubi, int pnum, int ec); | 138 | static int self_check_ec(struct ubi_device *ubi, int pnum, int ec); |
163 | static int self_check_in_wl_tree(const struct ubi_device *ubi, | 139 | static int self_check_in_wl_tree(const struct ubi_device *ubi, |
164 | struct ubi_wl_entry *e, struct rb_root *root); | 140 | struct ubi_wl_entry *e, struct rb_root *root); |
165 | static int self_check_in_pq(const struct ubi_device *ubi, | 141 | static int self_check_in_pq(const struct ubi_device *ubi, |
166 | struct ubi_wl_entry *e); | 142 | struct ubi_wl_entry *e); |
167 | 143 | ||
144 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
145 | /** | ||
146 | * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue | ||
147 | * @wrk: the work description object | ||
148 | */ | ||
149 | static void update_fastmap_work_fn(struct work_struct *wrk) | ||
150 | { | ||
151 | struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work); | ||
152 | ubi_update_fastmap(ubi); | ||
153 | } | ||
154 | |||
155 | /** | ||
156 | * ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap. | ||
157 | * @ubi: UBI device description object | ||
158 | * @pnum: the to be checked PEB | ||
159 | */ | ||
160 | static int ubi_is_fm_block(struct ubi_device *ubi, int pnum) | ||
161 | { | ||
162 | int i; | ||
163 | |||
164 | if (!ubi->fm) | ||
165 | return 0; | ||
166 | |||
167 | for (i = 0; i < ubi->fm->used_blocks; i++) | ||
168 | if (ubi->fm->e[i]->pnum == pnum) | ||
169 | return 1; | ||
170 | |||
171 | return 0; | ||
172 | } | ||
173 | #else | ||
174 | static int ubi_is_fm_block(struct ubi_device *ubi, int pnum) | ||
175 | { | ||
176 | return 0; | ||
177 | } | ||
178 | #endif | ||
179 | |||
168 | /** | 180 | /** |
169 | * wl_tree_add - add a wear-leveling entry to a WL RB-tree. | 181 | * wl_tree_add - add a wear-leveling entry to a WL RB-tree. |
170 | * @e: the wear-leveling entry to add | 182 | * @e: the wear-leveling entry to add |
@@ -261,18 +273,16 @@ static int produce_free_peb(struct ubi_device *ubi) | |||
261 | { | 273 | { |
262 | int err; | 274 | int err; |
263 | 275 | ||
264 | spin_lock(&ubi->wl_lock); | ||
265 | while (!ubi->free.rb_node) { | 276 | while (!ubi->free.rb_node) { |
266 | spin_unlock(&ubi->wl_lock); | 277 | spin_unlock(&ubi->wl_lock); |
267 | 278 | ||
268 | dbg_wl("do one work synchronously"); | 279 | dbg_wl("do one work synchronously"); |
269 | err = do_work(ubi); | 280 | err = do_work(ubi); |
270 | if (err) | ||
271 | return err; | ||
272 | 281 | ||
273 | spin_lock(&ubi->wl_lock); | 282 | spin_lock(&ubi->wl_lock); |
283 | if (err) | ||
284 | return err; | ||
274 | } | 285 | } |
275 | spin_unlock(&ubi->wl_lock); | ||
276 | 286 | ||
277 | return 0; | 287 | return 0; |
278 | } | 288 | } |
@@ -339,16 +349,18 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) | |||
339 | 349 | ||
340 | /** | 350 | /** |
341 | * find_wl_entry - find wear-leveling entry closest to certain erase counter. | 351 | * find_wl_entry - find wear-leveling entry closest to certain erase counter. |
352 | * @ubi: UBI device description object | ||
342 | * @root: the RB-tree where to look for | 353 | * @root: the RB-tree where to look for |
343 | * @diff: maximum possible difference from the smallest erase counter | 354 | * @diff: maximum possible difference from the smallest erase counter |
344 | * | 355 | * |
345 | * This function looks for a wear leveling entry with erase counter closest to | 356 | * This function looks for a wear leveling entry with erase counter closest to |
346 | * min + @diff, where min is the smallest erase counter. | 357 | * min + @diff, where min is the smallest erase counter. |
347 | */ | 358 | */ |
348 | static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff) | 359 | static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, |
360 | struct rb_root *root, int diff) | ||
349 | { | 361 | { |
350 | struct rb_node *p; | 362 | struct rb_node *p; |
351 | struct ubi_wl_entry *e; | 363 | struct ubi_wl_entry *e, *prev_e = NULL; |
352 | int max; | 364 | int max; |
353 | 365 | ||
354 | e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); | 366 | e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); |
@@ -363,35 +375,143 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff) | |||
363 | p = p->rb_left; | 375 | p = p->rb_left; |
364 | else { | 376 | else { |
365 | p = p->rb_right; | 377 | p = p->rb_right; |
378 | prev_e = e; | ||
366 | e = e1; | 379 | e = e1; |
367 | } | 380 | } |
368 | } | 381 | } |
369 | 382 | ||
383 | /* If no fastmap has been written and this WL entry can be used | ||
384 | * as anchor PEB, hold it back and return the second best WL entry | ||
385 | * such that fastmap can use the anchor PEB later. */ | ||
386 | if (prev_e && !ubi->fm_disabled && | ||
387 | !ubi->fm && e->pnum < UBI_FM_MAX_START) | ||
388 | return prev_e; | ||
389 | |||
370 | return e; | 390 | return e; |
371 | } | 391 | } |
372 | 392 | ||
373 | /** | 393 | /** |
374 | * ubi_wl_get_peb - get a physical eraseblock. | 394 | * find_mean_wl_entry - find wear-leveling entry with medium erase counter. |
395 | * @ubi: UBI device description object | ||
396 | * @root: the RB-tree where to look for | ||
397 | * | ||
398 | * This function looks for a wear leveling entry with medium erase counter, | ||
399 | * but not greater or equivalent than the lowest erase counter plus | ||
400 | * %WL_FREE_MAX_DIFF/2. | ||
401 | */ | ||
402 | static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi, | ||
403 | struct rb_root *root) | ||
404 | { | ||
405 | struct ubi_wl_entry *e, *first, *last; | ||
406 | |||
407 | first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); | ||
408 | last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb); | ||
409 | |||
410 | if (last->ec - first->ec < WL_FREE_MAX_DIFF) { | ||
411 | e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb); | ||
412 | |||
413 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
414 | /* If no fastmap has been written and this WL entry can be used | ||
415 | * as anchor PEB, hold it back and return the second best | ||
416 | * WL entry such that fastmap can use the anchor PEB later. */ | ||
417 | if (e && !ubi->fm_disabled && !ubi->fm && | ||
418 | e->pnum < UBI_FM_MAX_START) | ||
419 | e = rb_entry(rb_next(root->rb_node), | ||
420 | struct ubi_wl_entry, u.rb); | ||
421 | #endif | ||
422 | } else | ||
423 | e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2); | ||
424 | |||
425 | return e; | ||
426 | } | ||
427 | |||
428 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
429 | /** | ||
430 | * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB. | ||
431 | * @root: the RB-tree where to look for | ||
432 | */ | ||
433 | static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root) | ||
434 | { | ||
435 | struct rb_node *p; | ||
436 | struct ubi_wl_entry *e, *victim = NULL; | ||
437 | int max_ec = UBI_MAX_ERASECOUNTER; | ||
438 | |||
439 | ubi_rb_for_each_entry(p, e, root, u.rb) { | ||
440 | if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) { | ||
441 | victim = e; | ||
442 | max_ec = e->ec; | ||
443 | } | ||
444 | } | ||
445 | |||
446 | return victim; | ||
447 | } | ||
448 | |||
449 | static int anchor_pebs_avalible(struct rb_root *root) | ||
450 | { | ||
451 | struct rb_node *p; | ||
452 | struct ubi_wl_entry *e; | ||
453 | |||
454 | ubi_rb_for_each_entry(p, e, root, u.rb) | ||
455 | if (e->pnum < UBI_FM_MAX_START) | ||
456 | return 1; | ||
457 | |||
458 | return 0; | ||
459 | } | ||
460 | |||
461 | /** | ||
462 | * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number. | ||
463 | * @ubi: UBI device description object | ||
464 | * @anchor: This PEB will be used as anchor PEB by fastmap | ||
465 | * | ||
466 | * The function returns a physical erase block with a given maximal number | ||
467 | * and removes it from the wl subsystem. | ||
468 | * Must be called with wl_lock held! | ||
469 | */ | ||
470 | struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor) | ||
471 | { | ||
472 | struct ubi_wl_entry *e = NULL; | ||
473 | |||
474 | if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1)) | ||
475 | goto out; | ||
476 | |||
477 | if (anchor) | ||
478 | e = find_anchor_wl_entry(&ubi->free); | ||
479 | else | ||
480 | e = find_mean_wl_entry(ubi, &ubi->free); | ||
481 | |||
482 | if (!e) | ||
483 | goto out; | ||
484 | |||
485 | self_check_in_wl_tree(ubi, e, &ubi->free); | ||
486 | |||
487 | /* remove it from the free list, | ||
488 | * the wl subsystem does no longer know this erase block */ | ||
489 | rb_erase(&e->u.rb, &ubi->free); | ||
490 | ubi->free_count--; | ||
491 | out: | ||
492 | return e; | ||
493 | } | ||
494 | #endif | ||
495 | |||
496 | /** | ||
497 | * __wl_get_peb - get a physical eraseblock. | ||
375 | * @ubi: UBI device description object | 498 | * @ubi: UBI device description object |
376 | * | 499 | * |
377 | * This function returns a physical eraseblock in case of success and a | 500 | * This function returns a physical eraseblock in case of success and a |
378 | * negative error code in case of failure. Might sleep. | 501 | * negative error code in case of failure. Might sleep. |
379 | */ | 502 | */ |
380 | int ubi_wl_get_peb(struct ubi_device *ubi) | 503 | static int __wl_get_peb(struct ubi_device *ubi) |
381 | { | 504 | { |
382 | int err; | 505 | int err; |
383 | struct ubi_wl_entry *e, *first, *last; | 506 | struct ubi_wl_entry *e; |
384 | 507 | ||
385 | retry: | 508 | retry: |
386 | spin_lock(&ubi->wl_lock); | ||
387 | if (!ubi->free.rb_node) { | 509 | if (!ubi->free.rb_node) { |
388 | if (ubi->works_count == 0) { | 510 | if (ubi->works_count == 0) { |
389 | ubi_assert(list_empty(&ubi->works)); | ||
390 | ubi_err("no free eraseblocks"); | 511 | ubi_err("no free eraseblocks"); |
391 | spin_unlock(&ubi->wl_lock); | 512 | ubi_assert(list_empty(&ubi->works)); |
392 | return -ENOSPC; | 513 | return -ENOSPC; |
393 | } | 514 | } |
394 | spin_unlock(&ubi->wl_lock); | ||
395 | 515 | ||
396 | err = produce_free_peb(ubi); | 516 | err = produce_free_peb(ubi); |
397 | if (err < 0) | 517 | if (err < 0) |
@@ -399,13 +519,11 @@ retry: | |||
399 | goto retry; | 519 | goto retry; |
400 | } | 520 | } |
401 | 521 | ||
402 | first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb); | 522 | e = find_mean_wl_entry(ubi, &ubi->free); |
403 | last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb); | 523 | if (!e) { |
404 | 524 | ubi_err("no free eraseblocks"); | |
405 | if (last->ec - first->ec < WL_FREE_MAX_DIFF) | 525 | return -ENOSPC; |
406 | e = rb_entry(ubi->free.rb_node, struct ubi_wl_entry, u.rb); | 526 | } |
407 | else | ||
408 | e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2); | ||
409 | 527 | ||
410 | self_check_in_wl_tree(ubi, e, &ubi->free); | 528 | self_check_in_wl_tree(ubi, e, &ubi->free); |
411 | 529 | ||
@@ -414,10 +532,14 @@ retry: | |||
414 | * be protected from being moved for some time. | 532 | * be protected from being moved for some time. |
415 | */ | 533 | */ |
416 | rb_erase(&e->u.rb, &ubi->free); | 534 | rb_erase(&e->u.rb, &ubi->free); |
535 | ubi->free_count--; | ||
417 | dbg_wl("PEB %d EC %d", e->pnum, e->ec); | 536 | dbg_wl("PEB %d EC %d", e->pnum, e->ec); |
537 | #ifndef CONFIG_MTD_UBI_FASTMAP | ||
538 | /* We have to enqueue e only if fastmap is disabled, | ||
539 | * is fastmap enabled prot_queue_add() will be called by | ||
540 | * ubi_wl_get_peb() after removing e from the pool. */ | ||
418 | prot_queue_add(ubi, e); | 541 | prot_queue_add(ubi, e); |
419 | spin_unlock(&ubi->wl_lock); | 542 | #endif |
420 | |||
421 | err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset, | 543 | err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset, |
422 | ubi->peb_size - ubi->vid_hdr_aloffset); | 544 | ubi->peb_size - ubi->vid_hdr_aloffset); |
423 | if (err) { | 545 | if (err) { |
@@ -428,6 +550,150 @@ retry: | |||
428 | return e->pnum; | 550 | return e->pnum; |
429 | } | 551 | } |
430 | 552 | ||
553 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
554 | /** | ||
555 | * return_unused_pool_pebs - returns unused PEB to the free tree. | ||
556 | * @ubi: UBI device description object | ||
557 | * @pool: fastmap pool description object | ||
558 | */ | ||
559 | static void return_unused_pool_pebs(struct ubi_device *ubi, | ||
560 | struct ubi_fm_pool *pool) | ||
561 | { | ||
562 | int i; | ||
563 | struct ubi_wl_entry *e; | ||
564 | |||
565 | for (i = pool->used; i < pool->size; i++) { | ||
566 | e = ubi->lookuptbl[pool->pebs[i]]; | ||
567 | wl_tree_add(e, &ubi->free); | ||
568 | ubi->free_count++; | ||
569 | } | ||
570 | } | ||
571 | |||
572 | /** | ||
573 | * refill_wl_pool - refills all the fastmap pool used by the | ||
574 | * WL sub-system. | ||
575 | * @ubi: UBI device description object | ||
576 | */ | ||
577 | static void refill_wl_pool(struct ubi_device *ubi) | ||
578 | { | ||
579 | struct ubi_wl_entry *e; | ||
580 | struct ubi_fm_pool *pool = &ubi->fm_wl_pool; | ||
581 | |||
582 | return_unused_pool_pebs(ubi, pool); | ||
583 | |||
584 | for (pool->size = 0; pool->size < pool->max_size; pool->size++) { | ||
585 | if (!ubi->free.rb_node || | ||
586 | (ubi->free_count - ubi->beb_rsvd_pebs < 5)) | ||
587 | break; | ||
588 | |||
589 | e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); | ||
590 | self_check_in_wl_tree(ubi, e, &ubi->free); | ||
591 | rb_erase(&e->u.rb, &ubi->free); | ||
592 | ubi->free_count--; | ||
593 | |||
594 | pool->pebs[pool->size] = e->pnum; | ||
595 | } | ||
596 | pool->used = 0; | ||
597 | } | ||
598 | |||
599 | /** | ||
600 | * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb. | ||
601 | * @ubi: UBI device description object | ||
602 | */ | ||
603 | static void refill_wl_user_pool(struct ubi_device *ubi) | ||
604 | { | ||
605 | struct ubi_fm_pool *pool = &ubi->fm_pool; | ||
606 | |||
607 | return_unused_pool_pebs(ubi, pool); | ||
608 | |||
609 | for (pool->size = 0; pool->size < pool->max_size; pool->size++) { | ||
610 | if (!ubi->free.rb_node || | ||
611 | (ubi->free_count - ubi->beb_rsvd_pebs < 1)) | ||
612 | break; | ||
613 | |||
614 | pool->pebs[pool->size] = __wl_get_peb(ubi); | ||
615 | if (pool->pebs[pool->size] < 0) | ||
616 | break; | ||
617 | } | ||
618 | pool->used = 0; | ||
619 | } | ||
620 | |||
621 | /** | ||
622 | * ubi_refill_pools - refills all fastmap PEB pools. | ||
623 | * @ubi: UBI device description object | ||
624 | */ | ||
625 | void ubi_refill_pools(struct ubi_device *ubi) | ||
626 | { | ||
627 | spin_lock(&ubi->wl_lock); | ||
628 | refill_wl_pool(ubi); | ||
629 | refill_wl_user_pool(ubi); | ||
630 | spin_unlock(&ubi->wl_lock); | ||
631 | } | ||
632 | |||
633 | /* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of | ||
634 | * the fastmap pool. | ||
635 | */ | ||
636 | int ubi_wl_get_peb(struct ubi_device *ubi) | ||
637 | { | ||
638 | int ret; | ||
639 | struct ubi_fm_pool *pool = &ubi->fm_pool; | ||
640 | struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool; | ||
641 | |||
642 | if (!pool->size || !wl_pool->size || pool->used == pool->size || | ||
643 | wl_pool->used == wl_pool->size) | ||
644 | ubi_update_fastmap(ubi); | ||
645 | |||
646 | /* we got not a single free PEB */ | ||
647 | if (!pool->size) | ||
648 | ret = -ENOSPC; | ||
649 | else { | ||
650 | spin_lock(&ubi->wl_lock); | ||
651 | ret = pool->pebs[pool->used++]; | ||
652 | prot_queue_add(ubi, ubi->lookuptbl[ret]); | ||
653 | spin_unlock(&ubi->wl_lock); | ||
654 | } | ||
655 | |||
656 | return ret; | ||
657 | } | ||
658 | |||
659 | /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system. | ||
660 | * | ||
661 | * @ubi: UBI device description object | ||
662 | */ | ||
663 | static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) | ||
664 | { | ||
665 | struct ubi_fm_pool *pool = &ubi->fm_wl_pool; | ||
666 | int pnum; | ||
667 | |||
668 | if (pool->used == pool->size || !pool->size) { | ||
669 | /* We cannot update the fastmap here because this | ||
670 | * function is called in atomic context. | ||
671 | * Let's fail here and refill/update it as soon as possible. */ | ||
672 | schedule_work(&ubi->fm_work); | ||
673 | return NULL; | ||
674 | } else { | ||
675 | pnum = pool->pebs[pool->used++]; | ||
676 | return ubi->lookuptbl[pnum]; | ||
677 | } | ||
678 | } | ||
679 | #else | ||
680 | static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) | ||
681 | { | ||
682 | return find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); | ||
683 | } | ||
684 | |||
685 | int ubi_wl_get_peb(struct ubi_device *ubi) | ||
686 | { | ||
687 | int peb; | ||
688 | |||
689 | spin_lock(&ubi->wl_lock); | ||
690 | peb = __wl_get_peb(ubi); | ||
691 | spin_unlock(&ubi->wl_lock); | ||
692 | |||
693 | return peb; | ||
694 | } | ||
695 | #endif | ||
696 | |||
431 | /** | 697 | /** |
432 | * prot_queue_del - remove a physical eraseblock from the protection queue. | 698 | * prot_queue_del - remove a physical eraseblock from the protection queue. |
433 | * @ubi: UBI device description object | 699 | * @ubi: UBI device description object |
@@ -558,14 +824,14 @@ repeat: | |||
558 | } | 824 | } |
559 | 825 | ||
560 | /** | 826 | /** |
561 | * schedule_ubi_work - schedule a work. | 827 | * __schedule_ubi_work - schedule a work. |
562 | * @ubi: UBI device description object | 828 | * @ubi: UBI device description object |
563 | * @wrk: the work to schedule | 829 | * @wrk: the work to schedule |
564 | * | 830 | * |
565 | * This function adds a work defined by @wrk to the tail of the pending works | 831 | * This function adds a work defined by @wrk to the tail of the pending works |
566 | * list. | 832 | * list. Can only be used of ubi->work_sem is already held in read mode! |
567 | */ | 833 | */ |
568 | static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) | 834 | static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) |
569 | { | 835 | { |
570 | spin_lock(&ubi->wl_lock); | 836 | spin_lock(&ubi->wl_lock); |
571 | list_add_tail(&wrk->list, &ubi->works); | 837 | list_add_tail(&wrk->list, &ubi->works); |
@@ -576,9 +842,35 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) | |||
576 | spin_unlock(&ubi->wl_lock); | 842 | spin_unlock(&ubi->wl_lock); |
577 | } | 843 | } |
578 | 844 | ||
845 | /** | ||
846 | * schedule_ubi_work - schedule a work. | ||
847 | * @ubi: UBI device description object | ||
848 | * @wrk: the work to schedule | ||
849 | * | ||
850 | * This function adds a work defined by @wrk to the tail of the pending works | ||
851 | * list. | ||
852 | */ | ||
853 | static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) | ||
854 | { | ||
855 | down_read(&ubi->work_sem); | ||
856 | __schedule_ubi_work(ubi, wrk); | ||
857 | up_read(&ubi->work_sem); | ||
858 | } | ||
859 | |||
579 | static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | 860 | static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, |
580 | int cancel); | 861 | int cancel); |
581 | 862 | ||
863 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
864 | /** | ||
865 | * ubi_is_erase_work - checks whether a work is erase work. | ||
866 | * @wrk: The work object to be checked | ||
867 | */ | ||
868 | int ubi_is_erase_work(struct ubi_work *wrk) | ||
869 | { | ||
870 | return wrk->func == erase_worker; | ||
871 | } | ||
872 | #endif | ||
873 | |||
582 | /** | 874 | /** |
583 | * schedule_erase - schedule an erase work. | 875 | * schedule_erase - schedule an erase work. |
584 | * @ubi: UBI device description object | 876 | * @ubi: UBI device description object |
@@ -595,6 +887,9 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | |||
595 | { | 887 | { |
596 | struct ubi_work *wl_wrk; | 888 | struct ubi_work *wl_wrk; |
597 | 889 | ||
890 | ubi_assert(e); | ||
891 | ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); | ||
892 | |||
598 | dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", | 893 | dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", |
599 | e->pnum, e->ec, torture); | 894 | e->pnum, e->ec, torture); |
600 | 895 | ||
@@ -613,6 +908,79 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | |||
613 | } | 908 | } |
614 | 909 | ||
615 | /** | 910 | /** |
911 | * do_sync_erase - run the erase worker synchronously. | ||
912 | * @ubi: UBI device description object | ||
913 | * @e: the WL entry of the physical eraseblock to erase | ||
914 | * @vol_id: the volume ID that last used this PEB | ||
915 | * @lnum: the last used logical eraseblock number for the PEB | ||
916 | * @torture: if the physical eraseblock has to be tortured | ||
917 | * | ||
918 | */ | ||
919 | static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | ||
920 | int vol_id, int lnum, int torture) | ||
921 | { | ||
922 | struct ubi_work *wl_wrk; | ||
923 | |||
924 | dbg_wl("sync erase of PEB %i", e->pnum); | ||
925 | |||
926 | wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); | ||
927 | if (!wl_wrk) | ||
928 | return -ENOMEM; | ||
929 | |||
930 | wl_wrk->e = e; | ||
931 | wl_wrk->vol_id = vol_id; | ||
932 | wl_wrk->lnum = lnum; | ||
933 | wl_wrk->torture = torture; | ||
934 | |||
935 | return erase_worker(ubi, wl_wrk, 0); | ||
936 | } | ||
937 | |||
938 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
939 | /** | ||
940 | * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling | ||
941 | * sub-system. | ||
942 | * see: ubi_wl_put_peb() | ||
943 | * | ||
944 | * @ubi: UBI device description object | ||
945 | * @fm_e: physical eraseblock to return | ||
946 | * @lnum: the last used logical eraseblock number for the PEB | ||
947 | * @torture: if this physical eraseblock has to be tortured | ||
948 | */ | ||
949 | int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e, | ||
950 | int lnum, int torture) | ||
951 | { | ||
952 | struct ubi_wl_entry *e; | ||
953 | int vol_id, pnum = fm_e->pnum; | ||
954 | |||
955 | dbg_wl("PEB %d", pnum); | ||
956 | |||
957 | ubi_assert(pnum >= 0); | ||
958 | ubi_assert(pnum < ubi->peb_count); | ||
959 | |||
960 | spin_lock(&ubi->wl_lock); | ||
961 | e = ubi->lookuptbl[pnum]; | ||
962 | |||
963 | /* This can happen if we recovered from a fastmap the very | ||
964 | * first time and writing now a new one. In this case the wl system | ||
965 | * has never seen any PEB used by the original fastmap. | ||
966 | */ | ||
967 | if (!e) { | ||
968 | e = fm_e; | ||
969 | ubi_assert(e->ec >= 0); | ||
970 | ubi->lookuptbl[pnum] = e; | ||
971 | } else { | ||
972 | e->ec = fm_e->ec; | ||
973 | kfree(fm_e); | ||
974 | } | ||
975 | |||
976 | spin_unlock(&ubi->wl_lock); | ||
977 | |||
978 | vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID; | ||
979 | return schedule_erase(ubi, e, vol_id, lnum, torture); | ||
980 | } | ||
981 | #endif | ||
982 | |||
983 | /** | ||
616 | * wear_leveling_worker - wear-leveling worker function. | 984 | * wear_leveling_worker - wear-leveling worker function. |
617 | * @ubi: UBI device description object | 985 | * @ubi: UBI device description object |
618 | * @wrk: the work object | 986 | * @wrk: the work object |
@@ -627,6 +995,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
627 | { | 995 | { |
628 | int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; | 996 | int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; |
629 | int vol_id = -1, uninitialized_var(lnum); | 997 | int vol_id = -1, uninitialized_var(lnum); |
998 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
999 | int anchor = wrk->anchor; | ||
1000 | #endif | ||
630 | struct ubi_wl_entry *e1, *e2; | 1001 | struct ubi_wl_entry *e1, *e2; |
631 | struct ubi_vid_hdr *vid_hdr; | 1002 | struct ubi_vid_hdr *vid_hdr; |
632 | 1003 | ||
@@ -660,14 +1031,35 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
660 | goto out_cancel; | 1031 | goto out_cancel; |
661 | } | 1032 | } |
662 | 1033 | ||
1034 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
1035 | /* Check whether we need to produce an anchor PEB */ | ||
1036 | if (!anchor) | ||
1037 | anchor = !anchor_pebs_avalible(&ubi->free); | ||
1038 | |||
1039 | if (anchor) { | ||
1040 | e1 = find_anchor_wl_entry(&ubi->used); | ||
1041 | if (!e1) | ||
1042 | goto out_cancel; | ||
1043 | e2 = get_peb_for_wl(ubi); | ||
1044 | if (!e2) | ||
1045 | goto out_cancel; | ||
1046 | |||
1047 | self_check_in_wl_tree(ubi, e1, &ubi->used); | ||
1048 | rb_erase(&e1->u.rb, &ubi->used); | ||
1049 | dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum); | ||
1050 | } else if (!ubi->scrub.rb_node) { | ||
1051 | #else | ||
663 | if (!ubi->scrub.rb_node) { | 1052 | if (!ubi->scrub.rb_node) { |
1053 | #endif | ||
664 | /* | 1054 | /* |
665 | * Now pick the least worn-out used physical eraseblock and a | 1055 | * Now pick the least worn-out used physical eraseblock and a |
666 | * highly worn-out free physical eraseblock. If the erase | 1056 | * highly worn-out free physical eraseblock. If the erase |
667 | * counters differ much enough, start wear-leveling. | 1057 | * counters differ much enough, start wear-leveling. |
668 | */ | 1058 | */ |
669 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); | 1059 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); |
670 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 1060 | e2 = get_peb_for_wl(ubi); |
1061 | if (!e2) | ||
1062 | goto out_cancel; | ||
671 | 1063 | ||
672 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { | 1064 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { |
673 | dbg_wl("no WL needed: min used EC %d, max free EC %d", | 1065 | dbg_wl("no WL needed: min used EC %d, max free EC %d", |
@@ -682,14 +1074,15 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
682 | /* Perform scrubbing */ | 1074 | /* Perform scrubbing */ |
683 | scrubbing = 1; | 1075 | scrubbing = 1; |
684 | e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); | 1076 | e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); |
685 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 1077 | e2 = get_peb_for_wl(ubi); |
1078 | if (!e2) | ||
1079 | goto out_cancel; | ||
1080 | |||
686 | self_check_in_wl_tree(ubi, e1, &ubi->scrub); | 1081 | self_check_in_wl_tree(ubi, e1, &ubi->scrub); |
687 | rb_erase(&e1->u.rb, &ubi->scrub); | 1082 | rb_erase(&e1->u.rb, &ubi->scrub); |
688 | dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); | 1083 | dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); |
689 | } | 1084 | } |
690 | 1085 | ||
691 | self_check_in_wl_tree(ubi, e2, &ubi->free); | ||
692 | rb_erase(&e2->u.rb, &ubi->free); | ||
693 | ubi->move_from = e1; | 1086 | ubi->move_from = e1; |
694 | ubi->move_to = e2; | 1087 | ubi->move_to = e2; |
695 | spin_unlock(&ubi->wl_lock); | 1088 | spin_unlock(&ubi->wl_lock); |
@@ -806,7 +1199,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
806 | ubi->move_to_put = ubi->wl_scheduled = 0; | 1199 | ubi->move_to_put = ubi->wl_scheduled = 0; |
807 | spin_unlock(&ubi->wl_lock); | 1200 | spin_unlock(&ubi->wl_lock); |
808 | 1201 | ||
809 | err = schedule_erase(ubi, e1, vol_id, lnum, 0); | 1202 | err = do_sync_erase(ubi, e1, vol_id, lnum, 0); |
810 | if (err) { | 1203 | if (err) { |
811 | kmem_cache_free(ubi_wl_entry_slab, e1); | 1204 | kmem_cache_free(ubi_wl_entry_slab, e1); |
812 | if (e2) | 1205 | if (e2) |
@@ -821,7 +1214,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
821 | */ | 1214 | */ |
822 | dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", | 1215 | dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", |
823 | e2->pnum, vol_id, lnum); | 1216 | e2->pnum, vol_id, lnum); |
824 | err = schedule_erase(ubi, e2, vol_id, lnum, 0); | 1217 | err = do_sync_erase(ubi, e2, vol_id, lnum, 0); |
825 | if (err) { | 1218 | if (err) { |
826 | kmem_cache_free(ubi_wl_entry_slab, e2); | 1219 | kmem_cache_free(ubi_wl_entry_slab, e2); |
827 | goto out_ro; | 1220 | goto out_ro; |
@@ -860,7 +1253,7 @@ out_not_moved: | |||
860 | spin_unlock(&ubi->wl_lock); | 1253 | spin_unlock(&ubi->wl_lock); |
861 | 1254 | ||
862 | ubi_free_vid_hdr(ubi, vid_hdr); | 1255 | ubi_free_vid_hdr(ubi, vid_hdr); |
863 | err = schedule_erase(ubi, e2, vol_id, lnum, torture); | 1256 | err = do_sync_erase(ubi, e2, vol_id, lnum, torture); |
864 | if (err) { | 1257 | if (err) { |
865 | kmem_cache_free(ubi_wl_entry_slab, e2); | 1258 | kmem_cache_free(ubi_wl_entry_slab, e2); |
866 | goto out_ro; | 1259 | goto out_ro; |
@@ -901,12 +1294,13 @@ out_cancel: | |||
901 | /** | 1294 | /** |
902 | * ensure_wear_leveling - schedule wear-leveling if it is needed. | 1295 | * ensure_wear_leveling - schedule wear-leveling if it is needed. |
903 | * @ubi: UBI device description object | 1296 | * @ubi: UBI device description object |
1297 | * @nested: set to non-zero if this function is called from UBI worker | ||
904 | * | 1298 | * |
905 | * This function checks if it is time to start wear-leveling and schedules it | 1299 | * This function checks if it is time to start wear-leveling and schedules it |
906 | * if yes. This function returns zero in case of success and a negative error | 1300 | * if yes. This function returns zero in case of success and a negative error |
907 | * code in case of failure. | 1301 | * code in case of failure. |
908 | */ | 1302 | */ |
909 | static int ensure_wear_leveling(struct ubi_device *ubi) | 1303 | static int ensure_wear_leveling(struct ubi_device *ubi, int nested) |
910 | { | 1304 | { |
911 | int err = 0; | 1305 | int err = 0; |
912 | struct ubi_wl_entry *e1; | 1306 | struct ubi_wl_entry *e1; |
@@ -934,7 +1328,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi) | |||
934 | * %UBI_WL_THRESHOLD. | 1328 | * %UBI_WL_THRESHOLD. |
935 | */ | 1329 | */ |
936 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); | 1330 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); |
937 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 1331 | e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); |
938 | 1332 | ||
939 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) | 1333 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) |
940 | goto out_unlock; | 1334 | goto out_unlock; |
@@ -951,8 +1345,12 @@ static int ensure_wear_leveling(struct ubi_device *ubi) | |||
951 | goto out_cancel; | 1345 | goto out_cancel; |
952 | } | 1346 | } |
953 | 1347 | ||
1348 | wrk->anchor = 0; | ||
954 | wrk->func = &wear_leveling_worker; | 1349 | wrk->func = &wear_leveling_worker; |
955 | schedule_ubi_work(ubi, wrk); | 1350 | if (nested) |
1351 | __schedule_ubi_work(ubi, wrk); | ||
1352 | else | ||
1353 | schedule_ubi_work(ubi, wrk); | ||
956 | return err; | 1354 | return err; |
957 | 1355 | ||
958 | out_cancel: | 1356 | out_cancel: |
@@ -963,6 +1361,38 @@ out_unlock: | |||
963 | return err; | 1361 | return err; |
964 | } | 1362 | } |
965 | 1363 | ||
1364 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
1365 | /** | ||
1366 | * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB. | ||
1367 | * @ubi: UBI device description object | ||
1368 | */ | ||
1369 | int ubi_ensure_anchor_pebs(struct ubi_device *ubi) | ||
1370 | { | ||
1371 | struct ubi_work *wrk; | ||
1372 | |||
1373 | spin_lock(&ubi->wl_lock); | ||
1374 | if (ubi->wl_scheduled) { | ||
1375 | spin_unlock(&ubi->wl_lock); | ||
1376 | return 0; | ||
1377 | } | ||
1378 | ubi->wl_scheduled = 1; | ||
1379 | spin_unlock(&ubi->wl_lock); | ||
1380 | |||
1381 | wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); | ||
1382 | if (!wrk) { | ||
1383 | spin_lock(&ubi->wl_lock); | ||
1384 | ubi->wl_scheduled = 0; | ||
1385 | spin_unlock(&ubi->wl_lock); | ||
1386 | return -ENOMEM; | ||
1387 | } | ||
1388 | |||
1389 | wrk->anchor = 1; | ||
1390 | wrk->func = &wear_leveling_worker; | ||
1391 | schedule_ubi_work(ubi, wrk); | ||
1392 | return 0; | ||
1393 | } | ||
1394 | #endif | ||
1395 | |||
966 | /** | 1396 | /** |
967 | * erase_worker - physical eraseblock erase worker function. | 1397 | * erase_worker - physical eraseblock erase worker function. |
968 | * @ubi: UBI device description object | 1398 | * @ubi: UBI device description object |
@@ -993,6 +1423,8 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | |||
993 | dbg_wl("erase PEB %d EC %d LEB %d:%d", | 1423 | dbg_wl("erase PEB %d EC %d LEB %d:%d", |
994 | pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum); | 1424 | pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum); |
995 | 1425 | ||
1426 | ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); | ||
1427 | |||
996 | err = sync_erase(ubi, e, wl_wrk->torture); | 1428 | err = sync_erase(ubi, e, wl_wrk->torture); |
997 | if (!err) { | 1429 | if (!err) { |
998 | /* Fine, we've erased it successfully */ | 1430 | /* Fine, we've erased it successfully */ |
@@ -1000,6 +1432,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | |||
1000 | 1432 | ||
1001 | spin_lock(&ubi->wl_lock); | 1433 | spin_lock(&ubi->wl_lock); |
1002 | wl_tree_add(e, &ubi->free); | 1434 | wl_tree_add(e, &ubi->free); |
1435 | ubi->free_count++; | ||
1003 | spin_unlock(&ubi->wl_lock); | 1436 | spin_unlock(&ubi->wl_lock); |
1004 | 1437 | ||
1005 | /* | 1438 | /* |
@@ -1009,7 +1442,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | |||
1009 | serve_prot_queue(ubi); | 1442 | serve_prot_queue(ubi); |
1010 | 1443 | ||
1011 | /* And take care about wear-leveling */ | 1444 | /* And take care about wear-leveling */ |
1012 | err = ensure_wear_leveling(ubi); | 1445 | err = ensure_wear_leveling(ubi, 1); |
1013 | return err; | 1446 | return err; |
1014 | } | 1447 | } |
1015 | 1448 | ||
@@ -1247,7 +1680,7 @@ retry: | |||
1247 | * Technically scrubbing is the same as wear-leveling, so it is done | 1680 | * Technically scrubbing is the same as wear-leveling, so it is done |
1248 | * by the WL worker. | 1681 | * by the WL worker. |
1249 | */ | 1682 | */ |
1250 | return ensure_wear_leveling(ubi); | 1683 | return ensure_wear_leveling(ubi, 0); |
1251 | } | 1684 | } |
1252 | 1685 | ||
1253 | /** | 1686 | /** |
@@ -1428,7 +1861,7 @@ static void cancel_pending(struct ubi_device *ubi) | |||
1428 | */ | 1861 | */ |
1429 | int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | 1862 | int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) |
1430 | { | 1863 | { |
1431 | int err, i; | 1864 | int err, i, reserved_pebs, found_pebs = 0; |
1432 | struct rb_node *rb1, *rb2; | 1865 | struct rb_node *rb1, *rb2; |
1433 | struct ubi_ainf_volume *av; | 1866 | struct ubi_ainf_volume *av; |
1434 | struct ubi_ainf_peb *aeb, *tmp; | 1867 | struct ubi_ainf_peb *aeb, *tmp; |
@@ -1440,6 +1873,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1440 | init_rwsem(&ubi->work_sem); | 1873 | init_rwsem(&ubi->work_sem); |
1441 | ubi->max_ec = ai->max_ec; | 1874 | ubi->max_ec = ai->max_ec; |
1442 | INIT_LIST_HEAD(&ubi->works); | 1875 | INIT_LIST_HEAD(&ubi->works); |
1876 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
1877 | INIT_WORK(&ubi->fm_work, update_fastmap_work_fn); | ||
1878 | #endif | ||
1443 | 1879 | ||
1444 | sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); | 1880 | sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); |
1445 | 1881 | ||
@@ -1461,13 +1897,17 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1461 | 1897 | ||
1462 | e->pnum = aeb->pnum; | 1898 | e->pnum = aeb->pnum; |
1463 | e->ec = aeb->ec; | 1899 | e->ec = aeb->ec; |
1900 | ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); | ||
1464 | ubi->lookuptbl[e->pnum] = e; | 1901 | ubi->lookuptbl[e->pnum] = e; |
1465 | if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { | 1902 | if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { |
1466 | kmem_cache_free(ubi_wl_entry_slab, e); | 1903 | kmem_cache_free(ubi_wl_entry_slab, e); |
1467 | goto out_free; | 1904 | goto out_free; |
1468 | } | 1905 | } |
1906 | |||
1907 | found_pebs++; | ||
1469 | } | 1908 | } |
1470 | 1909 | ||
1910 | ubi->free_count = 0; | ||
1471 | list_for_each_entry(aeb, &ai->free, u.list) { | 1911 | list_for_each_entry(aeb, &ai->free, u.list) { |
1472 | cond_resched(); | 1912 | cond_resched(); |
1473 | 1913 | ||
@@ -1478,8 +1918,14 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1478 | e->pnum = aeb->pnum; | 1918 | e->pnum = aeb->pnum; |
1479 | e->ec = aeb->ec; | 1919 | e->ec = aeb->ec; |
1480 | ubi_assert(e->ec >= 0); | 1920 | ubi_assert(e->ec >= 0); |
1921 | ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); | ||
1922 | |||
1481 | wl_tree_add(e, &ubi->free); | 1923 | wl_tree_add(e, &ubi->free); |
1924 | ubi->free_count++; | ||
1925 | |||
1482 | ubi->lookuptbl[e->pnum] = e; | 1926 | ubi->lookuptbl[e->pnum] = e; |
1927 | |||
1928 | found_pebs++; | ||
1483 | } | 1929 | } |
1484 | 1930 | ||
1485 | ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { | 1931 | ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { |
@@ -1493,6 +1939,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1493 | e->pnum = aeb->pnum; | 1939 | e->pnum = aeb->pnum; |
1494 | e->ec = aeb->ec; | 1940 | e->ec = aeb->ec; |
1495 | ubi->lookuptbl[e->pnum] = e; | 1941 | ubi->lookuptbl[e->pnum] = e; |
1942 | |||
1496 | if (!aeb->scrub) { | 1943 | if (!aeb->scrub) { |
1497 | dbg_wl("add PEB %d EC %d to the used tree", | 1944 | dbg_wl("add PEB %d EC %d to the used tree", |
1498 | e->pnum, e->ec); | 1945 | e->pnum, e->ec); |
@@ -1502,22 +1949,38 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) | |||
1502 | e->pnum, e->ec); | 1949 | e->pnum, e->ec); |
1503 | wl_tree_add(e, &ubi->scrub); | 1950 | wl_tree_add(e, &ubi->scrub); |
1504 | } | 1951 | } |
1952 | |||
1953 | found_pebs++; | ||
1505 | } | 1954 | } |
1506 | } | 1955 | } |
1507 | 1956 | ||
1508 | if (ubi->avail_pebs < WL_RESERVED_PEBS) { | 1957 | dbg_wl("found %i PEBs", found_pebs); |
1958 | |||
1959 | if (ubi->fm) | ||
1960 | ubi_assert(ubi->good_peb_count == \ | ||
1961 | found_pebs + ubi->fm->used_blocks); | ||
1962 | else | ||
1963 | ubi_assert(ubi->good_peb_count == found_pebs); | ||
1964 | |||
1965 | reserved_pebs = WL_RESERVED_PEBS; | ||
1966 | #ifdef CONFIG_MTD_UBI_FASTMAP | ||
1967 | /* Reserve enough LEBs to store two fastmaps. */ | ||
1968 | reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2; | ||
1969 | #endif | ||
1970 | |||
1971 | if (ubi->avail_pebs < reserved_pebs) { | ||
1509 | ubi_err("no enough physical eraseblocks (%d, need %d)", | 1972 | ubi_err("no enough physical eraseblocks (%d, need %d)", |
1510 | ubi->avail_pebs, WL_RESERVED_PEBS); | 1973 | ubi->avail_pebs, reserved_pebs); |
1511 | if (ubi->corr_peb_count) | 1974 | if (ubi->corr_peb_count) |
1512 | ubi_err("%d PEBs are corrupted and not used", | 1975 | ubi_err("%d PEBs are corrupted and not used", |
1513 | ubi->corr_peb_count); | 1976 | ubi->corr_peb_count); |
1514 | goto out_free; | 1977 | goto out_free; |
1515 | } | 1978 | } |
1516 | ubi->avail_pebs -= WL_RESERVED_PEBS; | 1979 | ubi->avail_pebs -= reserved_pebs; |
1517 | ubi->rsvd_pebs += WL_RESERVED_PEBS; | 1980 | ubi->rsvd_pebs += reserved_pebs; |
1518 | 1981 | ||
1519 | /* Schedule wear-leveling if needed */ | 1982 | /* Schedule wear-leveling if needed */ |
1520 | err = ensure_wear_leveling(ubi); | 1983 | err = ensure_wear_leveling(ubi, 0); |
1521 | if (err) | 1984 | if (err) |
1522 | goto out_free; | 1985 | goto out_free; |
1523 | 1986 | ||
@@ -1596,7 +2059,7 @@ static int self_check_ec(struct ubi_device *ubi, int pnum, int ec) | |||
1596 | } | 2059 | } |
1597 | 2060 | ||
1598 | read_ec = be64_to_cpu(ec_hdr->ec); | 2061 | read_ec = be64_to_cpu(ec_hdr->ec); |
1599 | if (ec != read_ec) { | 2062 | if (ec != read_ec && read_ec - ec > 1) { |
1600 | ubi_err("self-check failed for PEB %d", pnum); | 2063 | ubi_err("self-check failed for PEB %d", pnum); |
1601 | ubi_err("read EC is %lld, should be %d", read_ec, ec); | 2064 | ubi_err("read EC is %lld, should be %d", read_ec, ec); |
1602 | dump_stack(); | 2065 | dump_stack(); |