aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi/wl.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/ubi/wl.c')
-rw-r--r--drivers/mtd/ubi/wl.c338
1 files changed, 184 insertions, 154 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 6330c8cc72b5..a471a491f0ab 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -117,21 +117,6 @@
117#define WL_MAX_FAILURES 32 117#define WL_MAX_FAILURES 32
118 118
119/** 119/**
120 * struct ubi_wl_entry - wear-leveling entry.
121 * @rb: link in the corresponding RB-tree
122 * @ec: erase counter
123 * @pnum: physical eraseblock number
124 *
125 * Each physical eraseblock has a corresponding &struct wl_entry object which
126 * may be kept in different RB-trees.
127 */
128struct ubi_wl_entry {
129 struct rb_node rb;
130 int ec;
131 int pnum;
132};
133
134/**
135 * struct ubi_wl_prot_entry - PEB protection entry. 120 * struct ubi_wl_prot_entry - PEB protection entry.
136 * @rb_pnum: link in the @wl->prot.pnum RB-tree 121 * @rb_pnum: link in the @wl->prot.pnum RB-tree
137 * @rb_aec: link in the @wl->prot.aec RB-tree 122 * @rb_aec: link in the @wl->prot.aec RB-tree
@@ -216,9 +201,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
216#define paranoid_check_in_wl_tree(e, root) 201#define paranoid_check_in_wl_tree(e, root)
217#endif 202#endif
218 203
219/* Slab cache for wear-leveling entries */
220static struct kmem_cache *wl_entries_slab;
221
222/** 204/**
223 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 205 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
224 * @e: the wear-leveling entry to add 206 * @e: the wear-leveling entry to add
@@ -267,15 +249,26 @@ static int do_work(struct ubi_device *ubi)
267 int err; 249 int err;
268 struct ubi_work *wrk; 250 struct ubi_work *wrk;
269 251
270 spin_lock(&ubi->wl_lock); 252 cond_resched();
271 253
254 /*
255 * @ubi->work_sem is used to synchronize with the workers. Workers take
256 * it in read mode, so many of them may be doing works at a time. But
257 * the queue flush code has to be sure the whole queue of works is
258 * done, and it takes the mutex in write mode.
259 */
260 down_read(&ubi->work_sem);
261 spin_lock(&ubi->wl_lock);
272 if (list_empty(&ubi->works)) { 262 if (list_empty(&ubi->works)) {
273 spin_unlock(&ubi->wl_lock); 263 spin_unlock(&ubi->wl_lock);
264 up_read(&ubi->work_sem);
274 return 0; 265 return 0;
275 } 266 }
276 267
277 wrk = list_entry(ubi->works.next, struct ubi_work, list); 268 wrk = list_entry(ubi->works.next, struct ubi_work, list);
278 list_del(&wrk->list); 269 list_del(&wrk->list);
270 ubi->works_count -= 1;
271 ubi_assert(ubi->works_count >= 0);
279 spin_unlock(&ubi->wl_lock); 272 spin_unlock(&ubi->wl_lock);
280 273
281 /* 274 /*
@@ -286,11 +279,8 @@ static int do_work(struct ubi_device *ubi)
286 err = wrk->func(ubi, wrk, 0); 279 err = wrk->func(ubi, wrk, 0);
287 if (err) 280 if (err)
288 ubi_err("work failed with error code %d", err); 281 ubi_err("work failed with error code %d", err);
282 up_read(&ubi->work_sem);
289 283
290 spin_lock(&ubi->wl_lock);
291 ubi->works_count -= 1;
292 ubi_assert(ubi->works_count >= 0);
293 spin_unlock(&ubi->wl_lock);
294 return err; 284 return err;
295} 285}
296 286
@@ -549,8 +539,12 @@ retry:
549 * prot_tree_del - remove a physical eraseblock from the protection trees 539 * prot_tree_del - remove a physical eraseblock from the protection trees
550 * @ubi: UBI device description object 540 * @ubi: UBI device description object
551 * @pnum: the physical eraseblock to remove 541 * @pnum: the physical eraseblock to remove
542 *
543 * This function returns PEB @pnum from the protection trees and returns zero
544 * in case of success and %-ENODEV if the PEB was not found in the protection
545 * trees.
552 */ 546 */
553static void prot_tree_del(struct ubi_device *ubi, int pnum) 547static int prot_tree_del(struct ubi_device *ubi, int pnum)
554{ 548{
555 struct rb_node *p; 549 struct rb_node *p;
556 struct ubi_wl_prot_entry *pe = NULL; 550 struct ubi_wl_prot_entry *pe = NULL;
@@ -561,7 +555,7 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
561 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); 555 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
562 556
563 if (pnum == pe->e->pnum) 557 if (pnum == pe->e->pnum)
564 break; 558 goto found;
565 559
566 if (pnum < pe->e->pnum) 560 if (pnum < pe->e->pnum)
567 p = p->rb_left; 561 p = p->rb_left;
@@ -569,10 +563,14 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
569 p = p->rb_right; 563 p = p->rb_right;
570 } 564 }
571 565
566 return -ENODEV;
567
568found:
572 ubi_assert(pe->e->pnum == pnum); 569 ubi_assert(pe->e->pnum == pnum);
573 rb_erase(&pe->rb_aec, &ubi->prot.aec); 570 rb_erase(&pe->rb_aec, &ubi->prot.aec);
574 rb_erase(&pe->rb_pnum, &ubi->prot.pnum); 571 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
575 kfree(pe); 572 kfree(pe);
573 return 0;
576} 574}
577 575
578/** 576/**
@@ -744,7 +742,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
744static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 742static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
745 int cancel) 743 int cancel)
746{ 744{
747 int err, put = 0; 745 int err, put = 0, scrubbing = 0, protect = 0;
746 struct ubi_wl_prot_entry *uninitialized_var(pe);
748 struct ubi_wl_entry *e1, *e2; 747 struct ubi_wl_entry *e1, *e2;
749 struct ubi_vid_hdr *vid_hdr; 748 struct ubi_vid_hdr *vid_hdr;
750 749
@@ -757,21 +756,17 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
757 if (!vid_hdr) 756 if (!vid_hdr)
758 return -ENOMEM; 757 return -ENOMEM;
759 758
759 mutex_lock(&ubi->move_mutex);
760 spin_lock(&ubi->wl_lock); 760 spin_lock(&ubi->wl_lock);
761 ubi_assert(!ubi->move_from && !ubi->move_to);
762 ubi_assert(!ubi->move_to_put);
761 763
762 /* 764 if (!ubi->free.rb_node ||
763 * Only one WL worker at a time is supported at this implementation, so
764 * make sure a PEB is not being moved already.
765 */
766 if (ubi->move_to || !ubi->free.rb_node ||
767 (!ubi->used.rb_node && !ubi->scrub.rb_node)) { 765 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
768 /* 766 /*
769 * Only one WL worker at a time is supported at this 767 * No free physical eraseblocks? Well, they must be waiting in
770 * implementation, so if a LEB is already being moved, cancel. 768 * the queue to be erased. Cancel movement - it will be
771 * 769 * triggered again when a free physical eraseblock appears.
772 * No free physical eraseblocks? Well, we cancel wear-leveling
773 * then. It will be triggered again when a free physical
774 * eraseblock appears.
775 * 770 *
776 * No used physical eraseblocks? They must be temporarily 771 * No used physical eraseblocks? They must be temporarily
777 * protected from being moved. They will be moved to the 772 * protected from being moved. They will be moved to the
@@ -780,10 +775,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
780 */ 775 */
781 dbg_wl("cancel WL, a list is empty: free %d, used %d", 776 dbg_wl("cancel WL, a list is empty: free %d, used %d",
782 !ubi->free.rb_node, !ubi->used.rb_node); 777 !ubi->free.rb_node, !ubi->used.rb_node);
783 ubi->wl_scheduled = 0; 778 goto out_cancel;
784 spin_unlock(&ubi->wl_lock);
785 ubi_free_vid_hdr(ubi, vid_hdr);
786 return 0;
787 } 779 }
788 780
789 if (!ubi->scrub.rb_node) { 781 if (!ubi->scrub.rb_node) {
@@ -798,27 +790,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
798 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { 790 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
799 dbg_wl("no WL needed: min used EC %d, max free EC %d", 791 dbg_wl("no WL needed: min used EC %d, max free EC %d",
800 e1->ec, e2->ec); 792 e1->ec, e2->ec);
801 ubi->wl_scheduled = 0; 793 goto out_cancel;
802 spin_unlock(&ubi->wl_lock);
803 ubi_free_vid_hdr(ubi, vid_hdr);
804 return 0;
805 } 794 }
806 paranoid_check_in_wl_tree(e1, &ubi->used); 795 paranoid_check_in_wl_tree(e1, &ubi->used);
807 rb_erase(&e1->rb, &ubi->used); 796 rb_erase(&e1->rb, &ubi->used);
808 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 797 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
809 e1->pnum, e1->ec, e2->pnum, e2->ec); 798 e1->pnum, e1->ec, e2->pnum, e2->ec);
810 } else { 799 } else {
800 /* Perform scrubbing */
801 scrubbing = 1;
811 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); 802 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
812 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 803 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
813 paranoid_check_in_wl_tree(e1, &ubi->scrub); 804 paranoid_check_in_wl_tree(e1, &ubi->scrub);
814 rb_erase(&e1->rb, &ubi->scrub); 805 rb_erase(&e1->rb, &ubi->scrub);
815 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 806 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
816 } 807 }
817 808
818 paranoid_check_in_wl_tree(e2, &ubi->free); 809 paranoid_check_in_wl_tree(e2, &ubi->free);
819 rb_erase(&e2->rb, &ubi->free); 810 rb_erase(&e2->rb, &ubi->free);
820 ubi_assert(!ubi->move_from && !ubi->move_to);
821 ubi_assert(!ubi->move_to_put && !ubi->move_from_put);
822 ubi->move_from = e1; 811 ubi->move_from = e1;
823 ubi->move_to = e2; 812 ubi->move_to = e2;
824 spin_unlock(&ubi->wl_lock); 813 spin_unlock(&ubi->wl_lock);
@@ -828,6 +817,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
828 * We so far do not know which logical eraseblock our physical 817 * We so far do not know which logical eraseblock our physical
829 * eraseblock (@e1) belongs to. We have to read the volume identifier 818 * eraseblock (@e1) belongs to. We have to read the volume identifier
830 * header first. 819 * header first.
820 *
821 * Note, we are protected from this PEB being unmapped and erased. The
822 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
823 * which is being moved was unmapped.
831 */ 824 */
832 825
833 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); 826 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
@@ -842,32 +835,51 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
842 * likely have the VID header in place. 835 * likely have the VID header in place.
843 */ 836 */
844 dbg_wl("PEB %d has no VID header", e1->pnum); 837 dbg_wl("PEB %d has no VID header", e1->pnum);
845 err = 0; 838 goto out_not_moved;
846 } else {
847 ubi_err("error %d while reading VID header from PEB %d",
848 err, e1->pnum);
849 if (err > 0)
850 err = -EIO;
851 } 839 }
852 goto error; 840
841 ubi_err("error %d while reading VID header from PEB %d",
842 err, e1->pnum);
843 if (err > 0)
844 err = -EIO;
845 goto out_error;
853 } 846 }
854 847
855 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 848 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
856 if (err) { 849 if (err) {
857 if (err == UBI_IO_BITFLIPS) 850
858 err = 0; 851 if (err < 0)
859 goto error; 852 goto out_error;
853 if (err == 1)
854 goto out_not_moved;
855
856 /*
857 * For some reason the LEB was not moved - it might be because
858 * the volume is being deleted. We should prevent this PEB from
859 * being selected for wear-levelling movement for some "time",
860 * so put it to the protection tree.
861 */
862
863 dbg_wl("cancelled moving PEB %d", e1->pnum);
864 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
865 if (!pe) {
866 err = -ENOMEM;
867 goto out_error;
868 }
869
870 protect = 1;
860 } 871 }
861 872
862 ubi_free_vid_hdr(ubi, vid_hdr); 873 ubi_free_vid_hdr(ubi, vid_hdr);
863 spin_lock(&ubi->wl_lock); 874 spin_lock(&ubi->wl_lock);
875 if (protect)
876 prot_tree_add(ubi, e1, pe, protect);
864 if (!ubi->move_to_put) 877 if (!ubi->move_to_put)
865 wl_tree_add(e2, &ubi->used); 878 wl_tree_add(e2, &ubi->used);
866 else 879 else
867 put = 1; 880 put = 1;
868 ubi->move_from = ubi->move_to = NULL; 881 ubi->move_from = ubi->move_to = NULL;
869 ubi->move_from_put = ubi->move_to_put = 0; 882 ubi->move_to_put = ubi->wl_scheduled = 0;
870 ubi->wl_scheduled = 0;
871 spin_unlock(&ubi->wl_lock); 883 spin_unlock(&ubi->wl_lock);
872 884
873 if (put) { 885 if (put) {
@@ -877,62 +889,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
877 */ 889 */
878 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); 890 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
879 err = schedule_erase(ubi, e2, 0); 891 err = schedule_erase(ubi, e2, 0);
880 if (err) { 892 if (err)
881 kmem_cache_free(wl_entries_slab, e2); 893 goto out_error;
882 ubi_ro_mode(ubi);
883 }
884 } 894 }
885 895
886 err = schedule_erase(ubi, e1, 0); 896 if (!protect) {
887 if (err) { 897 err = schedule_erase(ubi, e1, 0);
888 kmem_cache_free(wl_entries_slab, e1); 898 if (err)
889 ubi_ro_mode(ubi); 899 goto out_error;
890 } 900 }
891 901
902
892 dbg_wl("done"); 903 dbg_wl("done");
893 return err; 904 mutex_unlock(&ubi->move_mutex);
905 return 0;
894 906
895 /* 907 /*
896 * Some error occurred. @e1 was not changed, so return it back. @e2 908 * For some reasons the LEB was not moved, might be an error, might be
897 * might be changed, schedule it for erasure. 909 * something else. @e1 was not changed, so return it back. @e2 might
910 * be changed, schedule it for erasure.
898 */ 911 */
899error: 912out_not_moved:
900 if (err)
901 dbg_wl("error %d occurred, cancel operation", err);
902 ubi_assert(err <= 0);
903
904 ubi_free_vid_hdr(ubi, vid_hdr); 913 ubi_free_vid_hdr(ubi, vid_hdr);
905 spin_lock(&ubi->wl_lock); 914 spin_lock(&ubi->wl_lock);
906 ubi->wl_scheduled = 0; 915 if (scrubbing)
907 if (ubi->move_from_put) 916 wl_tree_add(e1, &ubi->scrub);
908 put = 1;
909 else 917 else
910 wl_tree_add(e1, &ubi->used); 918 wl_tree_add(e1, &ubi->used);
911 ubi->move_from = ubi->move_to = NULL; 919 ubi->move_from = ubi->move_to = NULL;
912 ubi->move_from_put = ubi->move_to_put = 0; 920 ubi->move_to_put = ubi->wl_scheduled = 0;
913 spin_unlock(&ubi->wl_lock); 921 spin_unlock(&ubi->wl_lock);
914 922
915 if (put) {
916 /*
917 * Well, the target PEB was put meanwhile, schedule it for
918 * erasure.
919 */
920 dbg_wl("PEB %d was put meanwhile, erase", e1->pnum);
921 err = schedule_erase(ubi, e1, 0);
922 if (err) {
923 kmem_cache_free(wl_entries_slab, e1);
924 ubi_ro_mode(ubi);
925 }
926 }
927
928 err = schedule_erase(ubi, e2, 0); 923 err = schedule_erase(ubi, e2, 0);
929 if (err) { 924 if (err)
930 kmem_cache_free(wl_entries_slab, e2); 925 goto out_error;
931 ubi_ro_mode(ubi); 926
932 } 927 mutex_unlock(&ubi->move_mutex);
928 return 0;
929
930out_error:
931 ubi_err("error %d while moving PEB %d to PEB %d",
932 err, e1->pnum, e2->pnum);
933 933
934 yield(); 934 ubi_free_vid_hdr(ubi, vid_hdr);
935 spin_lock(&ubi->wl_lock);
936 ubi->move_from = ubi->move_to = NULL;
937 ubi->move_to_put = ubi->wl_scheduled = 0;
938 spin_unlock(&ubi->wl_lock);
939
940 kmem_cache_free(ubi_wl_entry_slab, e1);
941 kmem_cache_free(ubi_wl_entry_slab, e2);
942 ubi_ro_mode(ubi);
943
944 mutex_unlock(&ubi->move_mutex);
935 return err; 945 return err;
946
947out_cancel:
948 ubi->wl_scheduled = 0;
949 spin_unlock(&ubi->wl_lock);
950 mutex_unlock(&ubi->move_mutex);
951 ubi_free_vid_hdr(ubi, vid_hdr);
952 return 0;
936} 953}
937 954
938/** 955/**
@@ -1020,7 +1037,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1020 if (cancel) { 1037 if (cancel) {
1021 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 1038 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1022 kfree(wl_wrk); 1039 kfree(wl_wrk);
1023 kmem_cache_free(wl_entries_slab, e); 1040 kmem_cache_free(ubi_wl_entry_slab, e);
1024 return 0; 1041 return 0;
1025 } 1042 }
1026 1043
@@ -1049,7 +1066,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1049 1066
1050 ubi_err("failed to erase PEB %d, error %d", pnum, err); 1067 ubi_err("failed to erase PEB %d, error %d", pnum, err);
1051 kfree(wl_wrk); 1068 kfree(wl_wrk);
1052 kmem_cache_free(wl_entries_slab, e); 1069 kmem_cache_free(ubi_wl_entry_slab, e);
1053 1070
1054 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || 1071 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1055 err == -EBUSY) { 1072 err == -EBUSY) {
@@ -1119,8 +1136,7 @@ out_ro:
1119} 1136}
1120 1137
1121/** 1138/**
1122 * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling 1139 * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit.
1123 * unit.
1124 * @ubi: UBI device description object 1140 * @ubi: UBI device description object
1125 * @pnum: physical eraseblock to return 1141 * @pnum: physical eraseblock to return
1126 * @torture: if this physical eraseblock has to be tortured 1142 * @torture: if this physical eraseblock has to be tortured
@@ -1128,7 +1144,7 @@ out_ro:
1128 * This function is called to return physical eraseblock @pnum to the pool of 1144 * This function is called to return physical eraseblock @pnum to the pool of
1129 * free physical eraseblocks. The @torture flag has to be set if an I/O error 1145 * free physical eraseblocks. The @torture flag has to be set if an I/O error
1130 * occurred to this @pnum and it has to be tested. This function returns zero 1146 * occurred to this @pnum and it has to be tested. This function returns zero
1131 * in case of success and a negative error code in case of failure. 1147 * in case of success, and a negative error code in case of failure.
1132 */ 1148 */
1133int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) 1149int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1134{ 1150{
@@ -1139,8 +1155,8 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1139 ubi_assert(pnum >= 0); 1155 ubi_assert(pnum >= 0);
1140 ubi_assert(pnum < ubi->peb_count); 1156 ubi_assert(pnum < ubi->peb_count);
1141 1157
1158retry:
1142 spin_lock(&ubi->wl_lock); 1159 spin_lock(&ubi->wl_lock);
1143
1144 e = ubi->lookuptbl[pnum]; 1160 e = ubi->lookuptbl[pnum];
1145 if (e == ubi->move_from) { 1161 if (e == ubi->move_from) {
1146 /* 1162 /*
@@ -1148,17 +1164,22 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1148 * be moved. It will be scheduled for erasure in the 1164 * be moved. It will be scheduled for erasure in the
1149 * wear-leveling worker. 1165 * wear-leveling worker.
1150 */ 1166 */
1151 dbg_wl("PEB %d is being moved", pnum); 1167 dbg_wl("PEB %d is being moved, wait", pnum);
1152 ubi_assert(!ubi->move_from_put);
1153 ubi->move_from_put = 1;
1154 spin_unlock(&ubi->wl_lock); 1168 spin_unlock(&ubi->wl_lock);
1155 return 0; 1169
1170 /* Wait for the WL worker by taking the @ubi->move_mutex */
1171 mutex_lock(&ubi->move_mutex);
1172 mutex_unlock(&ubi->move_mutex);
1173 goto retry;
1156 } else if (e == ubi->move_to) { 1174 } else if (e == ubi->move_to) {
1157 /* 1175 /*
1158 * User is putting the physical eraseblock which was selected 1176 * User is putting the physical eraseblock which was selected
1159 * as the target the data is moved to. It may happen if the EBA 1177 * as the target the data is moved to. It may happen if the EBA
1160 * unit already re-mapped the LEB but the WL unit did has not 1178 * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but
1161 * put the PEB to the "used" tree. 1179 * the WL unit has not put the PEB to the "used" tree yet, but
1180 * it is about to do this. So we just set a flag which will
1181 * tell the WL worker that the PEB is not needed anymore and
1182 * should be scheduled for erasure.
1162 */ 1183 */
1163 dbg_wl("PEB %d is the target of data moving", pnum); 1184 dbg_wl("PEB %d is the target of data moving", pnum);
1164 ubi_assert(!ubi->move_to_put); 1185 ubi_assert(!ubi->move_to_put);
@@ -1172,8 +1193,15 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1172 } else if (in_wl_tree(e, &ubi->scrub)) { 1193 } else if (in_wl_tree(e, &ubi->scrub)) {
1173 paranoid_check_in_wl_tree(e, &ubi->scrub); 1194 paranoid_check_in_wl_tree(e, &ubi->scrub);
1174 rb_erase(&e->rb, &ubi->scrub); 1195 rb_erase(&e->rb, &ubi->scrub);
1175 } else 1196 } else {
1176 prot_tree_del(ubi, e->pnum); 1197 err = prot_tree_del(ubi, e->pnum);
1198 if (err) {
1199 ubi_err("PEB %d not found", pnum);
1200 ubi_ro_mode(ubi);
1201 spin_unlock(&ubi->wl_lock);
1202 return err;
1203 }
1204 }
1177 } 1205 }
1178 spin_unlock(&ubi->wl_lock); 1206 spin_unlock(&ubi->wl_lock);
1179 1207
@@ -1227,8 +1255,17 @@ retry:
1227 if (in_wl_tree(e, &ubi->used)) { 1255 if (in_wl_tree(e, &ubi->used)) {
1228 paranoid_check_in_wl_tree(e, &ubi->used); 1256 paranoid_check_in_wl_tree(e, &ubi->used);
1229 rb_erase(&e->rb, &ubi->used); 1257 rb_erase(&e->rb, &ubi->used);
1230 } else 1258 } else {
1231 prot_tree_del(ubi, pnum); 1259 int err;
1260
1261 err = prot_tree_del(ubi, e->pnum);
1262 if (err) {
1263 ubi_err("PEB %d not found", pnum);
1264 ubi_ro_mode(ubi);
1265 spin_unlock(&ubi->wl_lock);
1266 return err;
1267 }
1268 }
1232 1269
1233 wl_tree_add(e, &ubi->scrub); 1270 wl_tree_add(e, &ubi->scrub);
1234 spin_unlock(&ubi->wl_lock); 1271 spin_unlock(&ubi->wl_lock);
@@ -1249,17 +1286,32 @@ retry:
1249 */ 1286 */
1250int ubi_wl_flush(struct ubi_device *ubi) 1287int ubi_wl_flush(struct ubi_device *ubi)
1251{ 1288{
1252 int err, pending_count; 1289 int err;
1253
1254 pending_count = ubi->works_count;
1255
1256 dbg_wl("flush (%d pending works)", pending_count);
1257 1290
1258 /* 1291 /*
1259 * Erase while the pending works queue is not empty, but not more then 1292 * Erase while the pending works queue is not empty, but not more then
1260 * the number of currently pending works. 1293 * the number of currently pending works.
1261 */ 1294 */
1262 while (pending_count-- > 0) { 1295 dbg_wl("flush (%d pending works)", ubi->works_count);
1296 while (ubi->works_count) {
1297 err = do_work(ubi);
1298 if (err)
1299 return err;
1300 }
1301
1302 /*
1303 * Make sure all the works which have been done in parallel are
1304 * finished.
1305 */
1306 down_write(&ubi->work_sem);
1307 up_write(&ubi->work_sem);
1308
1309 /*
1310 * And in case last was the WL worker and it cancelled the LEB
1311 * movement, flush again.
1312 */
1313 while (ubi->works_count) {
1314 dbg_wl("flush more (%d pending works)", ubi->works_count);
1263 err = do_work(ubi); 1315 err = do_work(ubi);
1264 if (err) 1316 if (err)
1265 return err; 1317 return err;
@@ -1294,7 +1346,7 @@ static void tree_destroy(struct rb_root *root)
1294 rb->rb_right = NULL; 1346 rb->rb_right = NULL;
1295 } 1347 }
1296 1348
1297 kmem_cache_free(wl_entries_slab, e); 1349 kmem_cache_free(ubi_wl_entry_slab, e);
1298 } 1350 }
1299 } 1351 }
1300} 1352}
@@ -1303,7 +1355,7 @@ static void tree_destroy(struct rb_root *root)
1303 * ubi_thread - UBI background thread. 1355 * ubi_thread - UBI background thread.
1304 * @u: the UBI device description object pointer 1356 * @u: the UBI device description object pointer
1305 */ 1357 */
1306static int ubi_thread(void *u) 1358int ubi_thread(void *u)
1307{ 1359{
1308 int failures = 0; 1360 int failures = 0;
1309 struct ubi_device *ubi = u; 1361 struct ubi_device *ubi = u;
@@ -1394,36 +1446,22 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1394 ubi->used = ubi->free = ubi->scrub = RB_ROOT; 1446 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1395 ubi->prot.pnum = ubi->prot.aec = RB_ROOT; 1447 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1396 spin_lock_init(&ubi->wl_lock); 1448 spin_lock_init(&ubi->wl_lock);
1449 mutex_init(&ubi->move_mutex);
1450 init_rwsem(&ubi->work_sem);
1397 ubi->max_ec = si->max_ec; 1451 ubi->max_ec = si->max_ec;
1398 INIT_LIST_HEAD(&ubi->works); 1452 INIT_LIST_HEAD(&ubi->works);
1399 1453
1400 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); 1454 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1401 1455
1402 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
1403 if (IS_ERR(ubi->bgt_thread)) {
1404 err = PTR_ERR(ubi->bgt_thread);
1405 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
1406 err);
1407 return err;
1408 }
1409
1410 if (ubi_devices_cnt == 0) {
1411 wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
1412 sizeof(struct ubi_wl_entry),
1413 0, 0, NULL);
1414 if (!wl_entries_slab)
1415 return -ENOMEM;
1416 }
1417
1418 err = -ENOMEM; 1456 err = -ENOMEM;
1419 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); 1457 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1420 if (!ubi->lookuptbl) 1458 if (!ubi->lookuptbl)
1421 goto out_free; 1459 return err;
1422 1460
1423 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { 1461 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1424 cond_resched(); 1462 cond_resched();
1425 1463
1426 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1464 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1427 if (!e) 1465 if (!e)
1428 goto out_free; 1466 goto out_free;
1429 1467
@@ -1431,7 +1469,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1431 e->ec = seb->ec; 1469 e->ec = seb->ec;
1432 ubi->lookuptbl[e->pnum] = e; 1470 ubi->lookuptbl[e->pnum] = e;
1433 if (schedule_erase(ubi, e, 0)) { 1471 if (schedule_erase(ubi, e, 0)) {
1434 kmem_cache_free(wl_entries_slab, e); 1472 kmem_cache_free(ubi_wl_entry_slab, e);
1435 goto out_free; 1473 goto out_free;
1436 } 1474 }
1437 } 1475 }
@@ -1439,7 +1477,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1439 list_for_each_entry(seb, &si->free, u.list) { 1477 list_for_each_entry(seb, &si->free, u.list) {
1440 cond_resched(); 1478 cond_resched();
1441 1479
1442 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1480 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1443 if (!e) 1481 if (!e)
1444 goto out_free; 1482 goto out_free;
1445 1483
@@ -1453,7 +1491,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1453 list_for_each_entry(seb, &si->corr, u.list) { 1491 list_for_each_entry(seb, &si->corr, u.list) {
1454 cond_resched(); 1492 cond_resched();
1455 1493
1456 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1494 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1457 if (!e) 1495 if (!e)
1458 goto out_free; 1496 goto out_free;
1459 1497
@@ -1461,7 +1499,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1461 e->ec = seb->ec; 1499 e->ec = seb->ec;
1462 ubi->lookuptbl[e->pnum] = e; 1500 ubi->lookuptbl[e->pnum] = e;
1463 if (schedule_erase(ubi, e, 0)) { 1501 if (schedule_erase(ubi, e, 0)) {
1464 kmem_cache_free(wl_entries_slab, e); 1502 kmem_cache_free(ubi_wl_entry_slab, e);
1465 goto out_free; 1503 goto out_free;
1466 } 1504 }
1467 } 1505 }
@@ -1470,7 +1508,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1470 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { 1508 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1471 cond_resched(); 1509 cond_resched();
1472 1510
1473 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1511 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1474 if (!e) 1512 if (!e)
1475 goto out_free; 1513 goto out_free;
1476 1514
@@ -1510,8 +1548,6 @@ out_free:
1510 tree_destroy(&ubi->free); 1548 tree_destroy(&ubi->free);
1511 tree_destroy(&ubi->scrub); 1549 tree_destroy(&ubi->scrub);
1512 kfree(ubi->lookuptbl); 1550 kfree(ubi->lookuptbl);
1513 if (ubi_devices_cnt == 0)
1514 kmem_cache_destroy(wl_entries_slab);
1515 return err; 1551 return err;
1516} 1552}
1517 1553
@@ -1541,7 +1577,7 @@ static void protection_trees_destroy(struct ubi_device *ubi)
1541 rb->rb_right = NULL; 1577 rb->rb_right = NULL;
1542 } 1578 }
1543 1579
1544 kmem_cache_free(wl_entries_slab, pe->e); 1580 kmem_cache_free(ubi_wl_entry_slab, pe->e);
1545 kfree(pe); 1581 kfree(pe);
1546 } 1582 }
1547 } 1583 }
@@ -1553,10 +1589,6 @@ static void protection_trees_destroy(struct ubi_device *ubi)
1553 */ 1589 */
1554void ubi_wl_close(struct ubi_device *ubi) 1590void ubi_wl_close(struct ubi_device *ubi)
1555{ 1591{
1556 dbg_wl("disable \"%s\"", ubi->bgt_name);
1557 if (ubi->bgt_thread)
1558 kthread_stop(ubi->bgt_thread);
1559
1560 dbg_wl("close the UBI wear-leveling unit"); 1592 dbg_wl("close the UBI wear-leveling unit");
1561 1593
1562 cancel_pending(ubi); 1594 cancel_pending(ubi);
@@ -1565,8 +1597,6 @@ void ubi_wl_close(struct ubi_device *ubi)
1565 tree_destroy(&ubi->free); 1597 tree_destroy(&ubi->free);
1566 tree_destroy(&ubi->scrub); 1598 tree_destroy(&ubi->scrub);
1567 kfree(ubi->lookuptbl); 1599 kfree(ubi->lookuptbl);
1568 if (ubi_devices_cnt == 1)
1569 kmem_cache_destroy(wl_entries_slab);
1570} 1600}
1571 1601
1572#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1602#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID