aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi/wl.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/ubi/wl.c')
-rw-r--r--drivers/mtd/ubi/wl.c136
1 files changed, 47 insertions, 89 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index a5a9b8d8730..a4f1bf33164 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -208,7 +208,7 @@ struct ubi_work {
208}; 208};
209 209
210#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 210#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
211static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec); 211static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
212static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, 212static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
213 struct rb_root *root); 213 struct rb_root *root);
214#else 214#else
@@ -220,17 +220,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
220static struct kmem_cache *wl_entries_slab; 220static struct kmem_cache *wl_entries_slab;
221 221
222/** 222/**
223 * tree_empty - a helper function to check if an RB-tree is empty.
224 * @root: the root of the tree
225 *
226 * This function returns non-zero if the RB-tree is empty and zero if not.
227 */
228static inline int tree_empty(struct rb_root *root)
229{
230 return root->rb_node == NULL;
231}
232
233/**
234 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 223 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
235 * @e: the wear-leveling entry to add 224 * @e: the wear-leveling entry to add
236 * @root: the root of the tree 225 * @root: the root of the tree
@@ -266,45 +255,6 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
266 rb_insert_color(&e->rb, root); 255 rb_insert_color(&e->rb, root);
267} 256}
268 257
269
270/*
271 * Helper functions to add and delete wear-leveling entries from different
272 * trees.
273 */
274
275static void free_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
276{
277 wl_tree_add(e, &ubi->free);
278}
279static inline void used_tree_add(struct ubi_device *ubi,
280 struct ubi_wl_entry *e)
281{
282 wl_tree_add(e, &ubi->used);
283}
284static inline void scrub_tree_add(struct ubi_device *ubi,
285 struct ubi_wl_entry *e)
286{
287 wl_tree_add(e, &ubi->scrub);
288}
289static inline void free_tree_del(struct ubi_device *ubi,
290 struct ubi_wl_entry *e)
291{
292 paranoid_check_in_wl_tree(e, &ubi->free);
293 rb_erase(&e->rb, &ubi->free);
294}
295static inline void used_tree_del(struct ubi_device *ubi,
296 struct ubi_wl_entry *e)
297{
298 paranoid_check_in_wl_tree(e, &ubi->used);
299 rb_erase(&e->rb, &ubi->used);
300}
301static inline void scrub_tree_del(struct ubi_device *ubi,
302 struct ubi_wl_entry *e)
303{
304 paranoid_check_in_wl_tree(e, &ubi->scrub);
305 rb_erase(&e->rb, &ubi->scrub);
306}
307
308/** 258/**
309 * do_work - do one pending work. 259 * do_work - do one pending work.
310 * @ubi: UBI device description object 260 * @ubi: UBI device description object
@@ -358,7 +308,7 @@ static int produce_free_peb(struct ubi_device *ubi)
358 int err; 308 int err;
359 309
360 spin_lock(&ubi->wl_lock); 310 spin_lock(&ubi->wl_lock);
361 while (tree_empty(&ubi->free)) { 311 while (!ubi->free.rb_node) {
362 spin_unlock(&ubi->wl_lock); 312 spin_unlock(&ubi->wl_lock);
363 313
364 dbg_wl("do one work synchronously"); 314 dbg_wl("do one work synchronously");
@@ -508,13 +458,13 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
508 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || 458 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
509 dtype == UBI_UNKNOWN); 459 dtype == UBI_UNKNOWN);
510 460
511 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_KERNEL); 461 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
512 if (!pe) 462 if (!pe)
513 return -ENOMEM; 463 return -ENOMEM;
514 464
515retry: 465retry:
516 spin_lock(&ubi->wl_lock); 466 spin_lock(&ubi->wl_lock);
517 if (tree_empty(&ubi->free)) { 467 if (!ubi->free.rb_node) {
518 if (ubi->works_count == 0) { 468 if (ubi->works_count == 0) {
519 ubi_assert(list_empty(&ubi->works)); 469 ubi_assert(list_empty(&ubi->works));
520 ubi_err("no free eraseblocks"); 470 ubi_err("no free eraseblocks");
@@ -585,7 +535,8 @@ retry:
585 * Move the physical eraseblock to the protection trees where it will 535 * Move the physical eraseblock to the protection trees where it will
586 * be protected from being moved for some time. 536 * be protected from being moved for some time.
587 */ 537 */
588 free_tree_del(ubi, e); 538 paranoid_check_in_wl_tree(e, &ubi->free);
539 rb_erase(&e->rb, &ubi->free);
589 prot_tree_add(ubi, e, pe, protect); 540 prot_tree_add(ubi, e, pe, protect);
590 541
591 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); 542 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
@@ -645,7 +596,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int tortur
645 if (err > 0) 596 if (err > 0)
646 return -EINVAL; 597 return -EINVAL;
647 598
648 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 599 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
649 if (!ec_hdr) 600 if (!ec_hdr)
650 return -ENOMEM; 601 return -ENOMEM;
651 602
@@ -704,7 +655,7 @@ static void check_protection_over(struct ubi_device *ubi)
704 */ 655 */
705 while (1) { 656 while (1) {
706 spin_lock(&ubi->wl_lock); 657 spin_lock(&ubi->wl_lock);
707 if (tree_empty(&ubi->prot.aec)) { 658 if (!ubi->prot.aec.rb_node) {
708 spin_unlock(&ubi->wl_lock); 659 spin_unlock(&ubi->wl_lock);
709 break; 660 break;
710 } 661 }
@@ -721,7 +672,7 @@ static void check_protection_over(struct ubi_device *ubi)
721 pe->e->pnum, ubi->abs_ec, pe->abs_ec); 672 pe->e->pnum, ubi->abs_ec, pe->abs_ec);
722 rb_erase(&pe->rb_aec, &ubi->prot.aec); 673 rb_erase(&pe->rb_aec, &ubi->prot.aec);
723 rb_erase(&pe->rb_pnum, &ubi->prot.pnum); 674 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
724 used_tree_add(ubi, pe->e); 675 wl_tree_add(pe->e, &ubi->used);
725 spin_unlock(&ubi->wl_lock); 676 spin_unlock(&ubi->wl_lock);
726 677
727 kfree(pe); 678 kfree(pe);
@@ -768,7 +719,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
768 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", 719 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
769 e->pnum, e->ec, torture); 720 e->pnum, e->ec, torture);
770 721
771 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL); 722 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
772 if (!wl_wrk) 723 if (!wl_wrk)
773 return -ENOMEM; 724 return -ENOMEM;
774 725
@@ -802,7 +753,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
802 if (cancel) 753 if (cancel)
803 return 0; 754 return 0;
804 755
805 vid_hdr = ubi_zalloc_vid_hdr(ubi); 756 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
806 if (!vid_hdr) 757 if (!vid_hdr)
807 return -ENOMEM; 758 return -ENOMEM;
808 759
@@ -812,8 +763,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
812 * Only one WL worker at a time is supported at this implementation, so 763 * Only one WL worker at a time is supported at this implementation, so
813 * make sure a PEB is not being moved already. 764 * make sure a PEB is not being moved already.
814 */ 765 */
815 if (ubi->move_to || tree_empty(&ubi->free) || 766 if (ubi->move_to || !ubi->free.rb_node ||
816 (tree_empty(&ubi->used) && tree_empty(&ubi->scrub))) { 767 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
817 /* 768 /*
818 * Only one WL worker at a time is supported at this 769 * Only one WL worker at a time is supported at this
819 * implementation, so if a LEB is already being moved, cancel. 770 * implementation, so if a LEB is already being moved, cancel.
@@ -828,14 +779,14 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
828 * triggered again. 779 * triggered again.
829 */ 780 */
830 dbg_wl("cancel WL, a list is empty: free %d, used %d", 781 dbg_wl("cancel WL, a list is empty: free %d, used %d",
831 tree_empty(&ubi->free), tree_empty(&ubi->used)); 782 !ubi->free.rb_node, !ubi->used.rb_node);
832 ubi->wl_scheduled = 0; 783 ubi->wl_scheduled = 0;
833 spin_unlock(&ubi->wl_lock); 784 spin_unlock(&ubi->wl_lock);
834 ubi_free_vid_hdr(ubi, vid_hdr); 785 ubi_free_vid_hdr(ubi, vid_hdr);
835 return 0; 786 return 0;
836 } 787 }
837 788
838 if (tree_empty(&ubi->scrub)) { 789 if (!ubi->scrub.rb_node) {
839 /* 790 /*
840 * Now pick the least worn-out used physical eraseblock and a 791 * Now pick the least worn-out used physical eraseblock and a
841 * highly worn-out free physical eraseblock. If the erase 792 * highly worn-out free physical eraseblock. If the erase
@@ -852,17 +803,20 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
852 ubi_free_vid_hdr(ubi, vid_hdr); 803 ubi_free_vid_hdr(ubi, vid_hdr);
853 return 0; 804 return 0;
854 } 805 }
855 used_tree_del(ubi, e1); 806 paranoid_check_in_wl_tree(e1, &ubi->used);
807 rb_erase(&e1->rb, &ubi->used);
856 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 808 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
857 e1->pnum, e1->ec, e2->pnum, e2->ec); 809 e1->pnum, e1->ec, e2->pnum, e2->ec);
858 } else { 810 } else {
859 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); 811 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
860 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 812 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
861 scrub_tree_del(ubi, e1); 813 paranoid_check_in_wl_tree(e1, &ubi->scrub);
814 rb_erase(&e1->rb, &ubi->scrub);
862 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 815 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
863 } 816 }
864 817
865 free_tree_del(ubi, e2); 818 paranoid_check_in_wl_tree(e2, &ubi->free);
819 rb_erase(&e2->rb, &ubi->free);
866 ubi_assert(!ubi->move_from && !ubi->move_to); 820 ubi_assert(!ubi->move_from && !ubi->move_to);
867 ubi_assert(!ubi->move_to_put && !ubi->move_from_put); 821 ubi_assert(!ubi->move_to_put && !ubi->move_from_put);
868 ubi->move_from = e1; 822 ubi->move_from = e1;
@@ -908,7 +862,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
908 ubi_free_vid_hdr(ubi, vid_hdr); 862 ubi_free_vid_hdr(ubi, vid_hdr);
909 spin_lock(&ubi->wl_lock); 863 spin_lock(&ubi->wl_lock);
910 if (!ubi->move_to_put) 864 if (!ubi->move_to_put)
911 used_tree_add(ubi, e2); 865 wl_tree_add(e2, &ubi->used);
912 else 866 else
913 put = 1; 867 put = 1;
914 ubi->move_from = ubi->move_to = NULL; 868 ubi->move_from = ubi->move_to = NULL;
@@ -953,7 +907,7 @@ error:
953 if (ubi->move_from_put) 907 if (ubi->move_from_put)
954 put = 1; 908 put = 1;
955 else 909 else
956 used_tree_add(ubi, e1); 910 wl_tree_add(e1, &ubi->used);
957 ubi->move_from = ubi->move_to = NULL; 911 ubi->move_from = ubi->move_to = NULL;
958 ubi->move_from_put = ubi->move_to_put = 0; 912 ubi->move_from_put = ubi->move_to_put = 0;
959 spin_unlock(&ubi->wl_lock); 913 spin_unlock(&ubi->wl_lock);
@@ -1005,8 +959,8 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
1005 * If the ubi->scrub tree is not empty, scrubbing is needed, and the 959 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1006 * the WL worker has to be scheduled anyway. 960 * the WL worker has to be scheduled anyway.
1007 */ 961 */
1008 if (tree_empty(&ubi->scrub)) { 962 if (!ubi->scrub.rb_node) {
1009 if (tree_empty(&ubi->used) || tree_empty(&ubi->free)) 963 if (!ubi->used.rb_node || !ubi->free.rb_node)
1010 /* No physical eraseblocks - no deal */ 964 /* No physical eraseblocks - no deal */
1011 goto out_unlock; 965 goto out_unlock;
1012 966
@@ -1028,7 +982,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
1028 ubi->wl_scheduled = 1; 982 ubi->wl_scheduled = 1;
1029 spin_unlock(&ubi->wl_lock); 983 spin_unlock(&ubi->wl_lock);
1030 984
1031 wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL); 985 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1032 if (!wrk) { 986 if (!wrk) {
1033 err = -ENOMEM; 987 err = -ENOMEM;
1034 goto out_cancel; 988 goto out_cancel;
@@ -1079,7 +1033,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1079 1033
1080 spin_lock(&ubi->wl_lock); 1034 spin_lock(&ubi->wl_lock);
1081 ubi->abs_ec += 1; 1035 ubi->abs_ec += 1;
1082 free_tree_add(ubi, e); 1036 wl_tree_add(e, &ubi->free);
1083 spin_unlock(&ubi->wl_lock); 1037 spin_unlock(&ubi->wl_lock);
1084 1038
1085 /* 1039 /*
@@ -1093,6 +1047,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1093 return err; 1047 return err;
1094 } 1048 }
1095 1049
1050 ubi_err("failed to erase PEB %d, error %d", pnum, err);
1096 kfree(wl_wrk); 1051 kfree(wl_wrk);
1097 kmem_cache_free(wl_entries_slab, e); 1052 kmem_cache_free(wl_entries_slab, e);
1098 1053
@@ -1211,11 +1166,13 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1211 spin_unlock(&ubi->wl_lock); 1166 spin_unlock(&ubi->wl_lock);
1212 return 0; 1167 return 0;
1213 } else { 1168 } else {
1214 if (in_wl_tree(e, &ubi->used)) 1169 if (in_wl_tree(e, &ubi->used)) {
1215 used_tree_del(ubi, e); 1170 paranoid_check_in_wl_tree(e, &ubi->used);
1216 else if (in_wl_tree(e, &ubi->scrub)) 1171 rb_erase(&e->rb, &ubi->used);
1217 scrub_tree_del(ubi, e); 1172 } else if (in_wl_tree(e, &ubi->scrub)) {
1218 else 1173 paranoid_check_in_wl_tree(e, &ubi->scrub);
1174 rb_erase(&e->rb, &ubi->scrub);
1175 } else
1219 prot_tree_del(ubi, e->pnum); 1176 prot_tree_del(ubi, e->pnum);
1220 } 1177 }
1221 spin_unlock(&ubi->wl_lock); 1178 spin_unlock(&ubi->wl_lock);
@@ -1223,7 +1180,7 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1223 err = schedule_erase(ubi, e, torture); 1180 err = schedule_erase(ubi, e, torture);
1224 if (err) { 1181 if (err) {
1225 spin_lock(&ubi->wl_lock); 1182 spin_lock(&ubi->wl_lock);
1226 used_tree_add(ubi, e); 1183 wl_tree_add(e, &ubi->used);
1227 spin_unlock(&ubi->wl_lock); 1184 spin_unlock(&ubi->wl_lock);
1228 } 1185 }
1229 1186
@@ -1267,12 +1224,13 @@ retry:
1267 goto retry; 1224 goto retry;
1268 } 1225 }
1269 1226
1270 if (in_wl_tree(e, &ubi->used)) 1227 if (in_wl_tree(e, &ubi->used)) {
1271 used_tree_del(ubi, e); 1228 paranoid_check_in_wl_tree(e, &ubi->used);
1272 else 1229 rb_erase(&e->rb, &ubi->used);
1230 } else
1273 prot_tree_del(ubi, pnum); 1231 prot_tree_del(ubi, pnum);
1274 1232
1275 scrub_tree_add(ubi, e); 1233 wl_tree_add(e, &ubi->scrub);
1276 spin_unlock(&ubi->wl_lock); 1234 spin_unlock(&ubi->wl_lock);
1277 1235
1278 /* 1236 /*
@@ -1488,7 +1446,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1488 e->pnum = seb->pnum; 1446 e->pnum = seb->pnum;
1489 e->ec = seb->ec; 1447 e->ec = seb->ec;
1490 ubi_assert(e->ec >= 0); 1448 ubi_assert(e->ec >= 0);
1491 free_tree_add(ubi, e); 1449 wl_tree_add(e, &ubi->free);
1492 ubi->lookuptbl[e->pnum] = e; 1450 ubi->lookuptbl[e->pnum] = e;
1493 } 1451 }
1494 1452
@@ -1522,16 +1480,16 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1522 if (!seb->scrub) { 1480 if (!seb->scrub) {
1523 dbg_wl("add PEB %d EC %d to the used tree", 1481 dbg_wl("add PEB %d EC %d to the used tree",
1524 e->pnum, e->ec); 1482 e->pnum, e->ec);
1525 used_tree_add(ubi, e); 1483 wl_tree_add(e, &ubi->used);
1526 } else { 1484 } else {
1527 dbg_wl("add PEB %d EC %d to the scrub tree", 1485 dbg_wl("add PEB %d EC %d to the scrub tree",
1528 e->pnum, e->ec); 1486 e->pnum, e->ec);
1529 scrub_tree_add(ubi, e); 1487 wl_tree_add(e, &ubi->scrub);
1530 } 1488 }
1531 } 1489 }
1532 } 1490 }
1533 1491
1534 if (WL_RESERVED_PEBS > ubi->avail_pebs) { 1492 if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1535 ubi_err("no enough physical eraseblocks (%d, need %d)", 1493 ubi_err("no enough physical eraseblocks (%d, need %d)",
1536 ubi->avail_pebs, WL_RESERVED_PEBS); 1494 ubi->avail_pebs, WL_RESERVED_PEBS);
1537 goto out_free; 1495 goto out_free;
@@ -1624,13 +1582,13 @@ void ubi_wl_close(struct ubi_device *ubi)
1624 * is equivalent to @ec, %1 if not, and a negative error code if an error 1582 * is equivalent to @ec, %1 if not, and a negative error code if an error
1625 * occurred. 1583 * occurred.
1626 */ 1584 */
1627static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec) 1585static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1628{ 1586{
1629 int err; 1587 int err;
1630 long long read_ec; 1588 long long read_ec;
1631 struct ubi_ec_hdr *ec_hdr; 1589 struct ubi_ec_hdr *ec_hdr;
1632 1590
1633 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 1591 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1634 if (!ec_hdr) 1592 if (!ec_hdr)
1635 return -ENOMEM; 1593 return -ENOMEM;
1636 1594