aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2007-09-13 07:48:20 -0400
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2007-10-14 06:10:21 -0400
commit5abde384e350e44e9d0584238b9ee327f2062f93 (patch)
tree78c80bba9b0f77c3f1d391d9f58ac473bab444e2 /drivers/mtd
parente8823bd63d50bb1f9bd73f1197230e1f7217456a (diff)
UBI: remove useless inlines
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/ubi/wl.c119
1 files changed, 38 insertions, 81 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 6e5315bf5e1b..a4f1bf33164a 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -220,17 +220,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
220static struct kmem_cache *wl_entries_slab; 220static struct kmem_cache *wl_entries_slab;
221 221
222/** 222/**
223 * tree_empty - a helper function to check if an RB-tree is empty.
224 * @root: the root of the tree
225 *
226 * This function returns non-zero if the RB-tree is empty and zero if not.
227 */
228static inline int tree_empty(struct rb_root *root)
229{
230 return root->rb_node == NULL;
231}
232
233/**
234 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 223 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
235 * @e: the wear-leveling entry to add 224 * @e: the wear-leveling entry to add
236 * @root: the root of the tree 225 * @root: the root of the tree
@@ -266,45 +255,6 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
266 rb_insert_color(&e->rb, root); 255 rb_insert_color(&e->rb, root);
267} 256}
268 257
269
270/*
271 * Helper functions to add and delete wear-leveling entries from different
272 * trees.
273 */
274
275static void free_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
276{
277 wl_tree_add(e, &ubi->free);
278}
279static inline void used_tree_add(struct ubi_device *ubi,
280 struct ubi_wl_entry *e)
281{
282 wl_tree_add(e, &ubi->used);
283}
284static inline void scrub_tree_add(struct ubi_device *ubi,
285 struct ubi_wl_entry *e)
286{
287 wl_tree_add(e, &ubi->scrub);
288}
289static inline void free_tree_del(struct ubi_device *ubi,
290 struct ubi_wl_entry *e)
291{
292 paranoid_check_in_wl_tree(e, &ubi->free);
293 rb_erase(&e->rb, &ubi->free);
294}
295static inline void used_tree_del(struct ubi_device *ubi,
296 struct ubi_wl_entry *e)
297{
298 paranoid_check_in_wl_tree(e, &ubi->used);
299 rb_erase(&e->rb, &ubi->used);
300}
301static inline void scrub_tree_del(struct ubi_device *ubi,
302 struct ubi_wl_entry *e)
303{
304 paranoid_check_in_wl_tree(e, &ubi->scrub);
305 rb_erase(&e->rb, &ubi->scrub);
306}
307
308/** 258/**
309 * do_work - do one pending work. 259 * do_work - do one pending work.
310 * @ubi: UBI device description object 260 * @ubi: UBI device description object
@@ -358,7 +308,7 @@ static int produce_free_peb(struct ubi_device *ubi)
358 int err; 308 int err;
359 309
360 spin_lock(&ubi->wl_lock); 310 spin_lock(&ubi->wl_lock);
361 while (tree_empty(&ubi->free)) { 311 while (!ubi->free.rb_node) {
362 spin_unlock(&ubi->wl_lock); 312 spin_unlock(&ubi->wl_lock);
363 313
364 dbg_wl("do one work synchronously"); 314 dbg_wl("do one work synchronously");
@@ -514,7 +464,7 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
514 464
515retry: 465retry:
516 spin_lock(&ubi->wl_lock); 466 spin_lock(&ubi->wl_lock);
517 if (tree_empty(&ubi->free)) { 467 if (!ubi->free.rb_node) {
518 if (ubi->works_count == 0) { 468 if (ubi->works_count == 0) {
519 ubi_assert(list_empty(&ubi->works)); 469 ubi_assert(list_empty(&ubi->works));
520 ubi_err("no free eraseblocks"); 470 ubi_err("no free eraseblocks");
@@ -585,7 +535,8 @@ retry:
585 * Move the physical eraseblock to the protection trees where it will 535 * Move the physical eraseblock to the protection trees where it will
586 * be protected from being moved for some time. 536 * be protected from being moved for some time.
587 */ 537 */
588 free_tree_del(ubi, e); 538 paranoid_check_in_wl_tree(e, &ubi->free);
539 rb_erase(&e->rb, &ubi->free);
589 prot_tree_add(ubi, e, pe, protect); 540 prot_tree_add(ubi, e, pe, protect);
590 541
591 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); 542 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
@@ -704,7 +655,7 @@ static void check_protection_over(struct ubi_device *ubi)
704 */ 655 */
705 while (1) { 656 while (1) {
706 spin_lock(&ubi->wl_lock); 657 spin_lock(&ubi->wl_lock);
707 if (tree_empty(&ubi->prot.aec)) { 658 if (!ubi->prot.aec.rb_node) {
708 spin_unlock(&ubi->wl_lock); 659 spin_unlock(&ubi->wl_lock);
709 break; 660 break;
710 } 661 }
@@ -721,7 +672,7 @@ static void check_protection_over(struct ubi_device *ubi)
721 pe->e->pnum, ubi->abs_ec, pe->abs_ec); 672 pe->e->pnum, ubi->abs_ec, pe->abs_ec);
722 rb_erase(&pe->rb_aec, &ubi->prot.aec); 673 rb_erase(&pe->rb_aec, &ubi->prot.aec);
723 rb_erase(&pe->rb_pnum, &ubi->prot.pnum); 674 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
724 used_tree_add(ubi, pe->e); 675 wl_tree_add(pe->e, &ubi->used);
725 spin_unlock(&ubi->wl_lock); 676 spin_unlock(&ubi->wl_lock);
726 677
727 kfree(pe); 678 kfree(pe);
@@ -812,8 +763,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
812 * Only one WL worker at a time is supported at this implementation, so 763 * Only one WL worker at a time is supported at this implementation, so
813 * make sure a PEB is not being moved already. 764 * make sure a PEB is not being moved already.
814 */ 765 */
815 if (ubi->move_to || tree_empty(&ubi->free) || 766 if (ubi->move_to || !ubi->free.rb_node ||
816 (tree_empty(&ubi->used) && tree_empty(&ubi->scrub))) { 767 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
817 /* 768 /*
818 * Only one WL worker at a time is supported at this 769 * Only one WL worker at a time is supported at this
819 * implementation, so if a LEB is already being moved, cancel. 770 * implementation, so if a LEB is already being moved, cancel.
@@ -828,14 +779,14 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
828 * triggered again. 779 * triggered again.
829 */ 780 */
830 dbg_wl("cancel WL, a list is empty: free %d, used %d", 781 dbg_wl("cancel WL, a list is empty: free %d, used %d",
831 tree_empty(&ubi->free), tree_empty(&ubi->used)); 782 !ubi->free.rb_node, !ubi->used.rb_node);
832 ubi->wl_scheduled = 0; 783 ubi->wl_scheduled = 0;
833 spin_unlock(&ubi->wl_lock); 784 spin_unlock(&ubi->wl_lock);
834 ubi_free_vid_hdr(ubi, vid_hdr); 785 ubi_free_vid_hdr(ubi, vid_hdr);
835 return 0; 786 return 0;
836 } 787 }
837 788
838 if (tree_empty(&ubi->scrub)) { 789 if (!ubi->scrub.rb_node) {
839 /* 790 /*
840 * Now pick the least worn-out used physical eraseblock and a 791 * Now pick the least worn-out used physical eraseblock and a
841 * highly worn-out free physical eraseblock. If the erase 792 * highly worn-out free physical eraseblock. If the erase
@@ -852,17 +803,20 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
852 ubi_free_vid_hdr(ubi, vid_hdr); 803 ubi_free_vid_hdr(ubi, vid_hdr);
853 return 0; 804 return 0;
854 } 805 }
855 used_tree_del(ubi, e1); 806 paranoid_check_in_wl_tree(e1, &ubi->used);
807 rb_erase(&e1->rb, &ubi->used);
856 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 808 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
857 e1->pnum, e1->ec, e2->pnum, e2->ec); 809 e1->pnum, e1->ec, e2->pnum, e2->ec);
858 } else { 810 } else {
859 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); 811 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
860 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 812 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
861 scrub_tree_del(ubi, e1); 813 paranoid_check_in_wl_tree(e1, &ubi->scrub);
814 rb_erase(&e1->rb, &ubi->scrub);
862 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 815 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
863 } 816 }
864 817
865 free_tree_del(ubi, e2); 818 paranoid_check_in_wl_tree(e2, &ubi->free);
819 rb_erase(&e2->rb, &ubi->free);
866 ubi_assert(!ubi->move_from && !ubi->move_to); 820 ubi_assert(!ubi->move_from && !ubi->move_to);
867 ubi_assert(!ubi->move_to_put && !ubi->move_from_put); 821 ubi_assert(!ubi->move_to_put && !ubi->move_from_put);
868 ubi->move_from = e1; 822 ubi->move_from = e1;
@@ -908,7 +862,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
908 ubi_free_vid_hdr(ubi, vid_hdr); 862 ubi_free_vid_hdr(ubi, vid_hdr);
909 spin_lock(&ubi->wl_lock); 863 spin_lock(&ubi->wl_lock);
910 if (!ubi->move_to_put) 864 if (!ubi->move_to_put)
911 used_tree_add(ubi, e2); 865 wl_tree_add(e2, &ubi->used);
912 else 866 else
913 put = 1; 867 put = 1;
914 ubi->move_from = ubi->move_to = NULL; 868 ubi->move_from = ubi->move_to = NULL;
@@ -953,7 +907,7 @@ error:
953 if (ubi->move_from_put) 907 if (ubi->move_from_put)
954 put = 1; 908 put = 1;
955 else 909 else
956 used_tree_add(ubi, e1); 910 wl_tree_add(e1, &ubi->used);
957 ubi->move_from = ubi->move_to = NULL; 911 ubi->move_from = ubi->move_to = NULL;
958 ubi->move_from_put = ubi->move_to_put = 0; 912 ubi->move_from_put = ubi->move_to_put = 0;
959 spin_unlock(&ubi->wl_lock); 913 spin_unlock(&ubi->wl_lock);
@@ -1005,8 +959,8 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
1005 * If the ubi->scrub tree is not empty, scrubbing is needed, and the 959 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1006 * the WL worker has to be scheduled anyway. 960 * the WL worker has to be scheduled anyway.
1007 */ 961 */
1008 if (tree_empty(&ubi->scrub)) { 962 if (!ubi->scrub.rb_node) {
1009 if (tree_empty(&ubi->used) || tree_empty(&ubi->free)) 963 if (!ubi->used.rb_node || !ubi->free.rb_node)
1010 /* No physical eraseblocks - no deal */ 964 /* No physical eraseblocks - no deal */
1011 goto out_unlock; 965 goto out_unlock;
1012 966
@@ -1079,7 +1033,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1079 1033
1080 spin_lock(&ubi->wl_lock); 1034 spin_lock(&ubi->wl_lock);
1081 ubi->abs_ec += 1; 1035 ubi->abs_ec += 1;
1082 free_tree_add(ubi, e); 1036 wl_tree_add(e, &ubi->free);
1083 spin_unlock(&ubi->wl_lock); 1037 spin_unlock(&ubi->wl_lock);
1084 1038
1085 /* 1039 /*
@@ -1212,11 +1166,13 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1212 spin_unlock(&ubi->wl_lock); 1166 spin_unlock(&ubi->wl_lock);
1213 return 0; 1167 return 0;
1214 } else { 1168 } else {
1215 if (in_wl_tree(e, &ubi->used)) 1169 if (in_wl_tree(e, &ubi->used)) {
1216 used_tree_del(ubi, e); 1170 paranoid_check_in_wl_tree(e, &ubi->used);
1217 else if (in_wl_tree(e, &ubi->scrub)) 1171 rb_erase(&e->rb, &ubi->used);
1218 scrub_tree_del(ubi, e); 1172 } else if (in_wl_tree(e, &ubi->scrub)) {
1219 else 1173 paranoid_check_in_wl_tree(e, &ubi->scrub);
1174 rb_erase(&e->rb, &ubi->scrub);
1175 } else
1220 prot_tree_del(ubi, e->pnum); 1176 prot_tree_del(ubi, e->pnum);
1221 } 1177 }
1222 spin_unlock(&ubi->wl_lock); 1178 spin_unlock(&ubi->wl_lock);
@@ -1224,7 +1180,7 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1224 err = schedule_erase(ubi, e, torture); 1180 err = schedule_erase(ubi, e, torture);
1225 if (err) { 1181 if (err) {
1226 spin_lock(&ubi->wl_lock); 1182 spin_lock(&ubi->wl_lock);
1227 used_tree_add(ubi, e); 1183 wl_tree_add(e, &ubi->used);
1228 spin_unlock(&ubi->wl_lock); 1184 spin_unlock(&ubi->wl_lock);
1229 } 1185 }
1230 1186
@@ -1268,12 +1224,13 @@ retry:
1268 goto retry; 1224 goto retry;
1269 } 1225 }
1270 1226
1271 if (in_wl_tree(e, &ubi->used)) 1227 if (in_wl_tree(e, &ubi->used)) {
1272 used_tree_del(ubi, e); 1228 paranoid_check_in_wl_tree(e, &ubi->used);
1273 else 1229 rb_erase(&e->rb, &ubi->used);
1230 } else
1274 prot_tree_del(ubi, pnum); 1231 prot_tree_del(ubi, pnum);
1275 1232
1276 scrub_tree_add(ubi, e); 1233 wl_tree_add(e, &ubi->scrub);
1277 spin_unlock(&ubi->wl_lock); 1234 spin_unlock(&ubi->wl_lock);
1278 1235
1279 /* 1236 /*
@@ -1489,7 +1446,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1489 e->pnum = seb->pnum; 1446 e->pnum = seb->pnum;
1490 e->ec = seb->ec; 1447 e->ec = seb->ec;
1491 ubi_assert(e->ec >= 0); 1448 ubi_assert(e->ec >= 0);
1492 free_tree_add(ubi, e); 1449 wl_tree_add(e, &ubi->free);
1493 ubi->lookuptbl[e->pnum] = e; 1450 ubi->lookuptbl[e->pnum] = e;
1494 } 1451 }
1495 1452
@@ -1523,16 +1480,16 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1523 if (!seb->scrub) { 1480 if (!seb->scrub) {
1524 dbg_wl("add PEB %d EC %d to the used tree", 1481 dbg_wl("add PEB %d EC %d to the used tree",
1525 e->pnum, e->ec); 1482 e->pnum, e->ec);
1526 used_tree_add(ubi, e); 1483 wl_tree_add(e, &ubi->used);
1527 } else { 1484 } else {
1528 dbg_wl("add PEB %d EC %d to the scrub tree", 1485 dbg_wl("add PEB %d EC %d to the scrub tree",
1529 e->pnum, e->ec); 1486 e->pnum, e->ec);
1530 scrub_tree_add(ubi, e); 1487 wl_tree_add(e, &ubi->scrub);
1531 } 1488 }
1532 } 1489 }
1533 } 1490 }
1534 1491
1535 if (WL_RESERVED_PEBS > ubi->avail_pebs) { 1492 if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1536 ubi_err("no enough physical eraseblocks (%d, need %d)", 1493 ubi_err("no enough physical eraseblocks (%d, need %d)",
1537 ubi->avail_pebs, WL_RESERVED_PEBS); 1494 ubi->avail_pebs, WL_RESERVED_PEBS);
1538 goto out_free; 1495 goto out_free;