diff options
author | Xiaochuan-Xu <xiaochuan-xu@cqu.edu.cn> | 2008-12-09 06:44:12 -0500 |
---|---|---|
committer | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2008-12-15 12:34:50 -0500 |
commit | 23553b2c08c9b6e96be98c44feb9c5e640d3e789 (patch) | |
tree | 222756174c6f0194b9f34b7448ba3976ed0032cf /drivers/mtd | |
parent | ad5942bad6addcf9697a74413b517d9724d803a4 (diff) |
UBI: prepare for protection tree improvements
This patch modifies @struct ubi_wl_entry and adds union which
contains only one element so far. This is just a preparation
for further changes which will kill the protection tree and
make UBI use a list instead.
Signed-off-by: Xiaochuan-Xu <xiaochuan-xu@cqu.edu.cn>
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r-- | drivers/mtd/ubi/ubi.h | 6 | ||||
-rw-r--r-- | drivers/mtd/ubi/wl.c | 45 |
2 files changed, 27 insertions, 24 deletions
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 1c3fa18c26a7..46a4763f8e7c 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h | |||
@@ -95,7 +95,7 @@ enum { | |||
95 | 95 | ||
96 | /** | 96 | /** |
97 | * struct ubi_wl_entry - wear-leveling entry. | 97 | * struct ubi_wl_entry - wear-leveling entry. |
98 | * @rb: link in the corresponding RB-tree | 98 | * @u.rb: link in the corresponding (free/used) RB-tree |
99 | * @ec: erase counter | 99 | * @ec: erase counter |
100 | * @pnum: physical eraseblock number | 100 | * @pnum: physical eraseblock number |
101 | * | 101 | * |
@@ -104,7 +104,9 @@ enum { | |||
104 | * RB-trees. See WL sub-system for details. | 104 | * RB-trees. See WL sub-system for details. |
105 | */ | 105 | */ |
106 | struct ubi_wl_entry { | 106 | struct ubi_wl_entry { |
107 | struct rb_node rb; | 107 | union { |
108 | struct rb_node rb; | ||
109 | } u; | ||
108 | int ec; | 110 | int ec; |
109 | int pnum; | 111 | int pnum; |
110 | }; | 112 | }; |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index abf65ea414e7..0279bf9dc722 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -220,7 +220,7 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) | |||
220 | struct ubi_wl_entry *e1; | 220 | struct ubi_wl_entry *e1; |
221 | 221 | ||
222 | parent = *p; | 222 | parent = *p; |
223 | e1 = rb_entry(parent, struct ubi_wl_entry, rb); | 223 | e1 = rb_entry(parent, struct ubi_wl_entry, u.rb); |
224 | 224 | ||
225 | if (e->ec < e1->ec) | 225 | if (e->ec < e1->ec) |
226 | p = &(*p)->rb_left; | 226 | p = &(*p)->rb_left; |
@@ -235,8 +235,8 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) | |||
235 | } | 235 | } |
236 | } | 236 | } |
237 | 237 | ||
238 | rb_link_node(&e->rb, parent, p); | 238 | rb_link_node(&e->u.rb, parent, p); |
239 | rb_insert_color(&e->rb, root); | 239 | rb_insert_color(&e->u.rb, root); |
240 | } | 240 | } |
241 | 241 | ||
242 | /** | 242 | /** |
@@ -331,7 +331,7 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) | |||
331 | while (p) { | 331 | while (p) { |
332 | struct ubi_wl_entry *e1; | 332 | struct ubi_wl_entry *e1; |
333 | 333 | ||
334 | e1 = rb_entry(p, struct ubi_wl_entry, rb); | 334 | e1 = rb_entry(p, struct ubi_wl_entry, u.rb); |
335 | 335 | ||
336 | if (e->pnum == e1->pnum) { | 336 | if (e->pnum == e1->pnum) { |
337 | ubi_assert(e == e1); | 337 | ubi_assert(e == e1); |
@@ -413,14 +413,14 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) | |||
413 | struct rb_node *p; | 413 | struct rb_node *p; |
414 | struct ubi_wl_entry *e; | 414 | struct ubi_wl_entry *e; |
415 | 415 | ||
416 | e = rb_entry(rb_first(root), struct ubi_wl_entry, rb); | 416 | e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); |
417 | max += e->ec; | 417 | max += e->ec; |
418 | 418 | ||
419 | p = root->rb_node; | 419 | p = root->rb_node; |
420 | while (p) { | 420 | while (p) { |
421 | struct ubi_wl_entry *e1; | 421 | struct ubi_wl_entry *e1; |
422 | 422 | ||
423 | e1 = rb_entry(p, struct ubi_wl_entry, rb); | 423 | e1 = rb_entry(p, struct ubi_wl_entry, u.rb); |
424 | if (e1->ec >= max) | 424 | if (e1->ec >= max) |
425 | p = p->rb_left; | 425 | p = p->rb_left; |
426 | else { | 426 | else { |
@@ -491,12 +491,13 @@ retry: | |||
491 | * eraseblock with erase counter greater or equivalent than the | 491 | * eraseblock with erase counter greater or equivalent than the |
492 | * lowest erase counter plus %WL_FREE_MAX_DIFF. | 492 | * lowest erase counter plus %WL_FREE_MAX_DIFF. |
493 | */ | 493 | */ |
494 | first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); | 494 | first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, |
495 | last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, rb); | 495 | u.rb); |
496 | last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb); | ||
496 | 497 | ||
497 | if (last->ec - first->ec < WL_FREE_MAX_DIFF) | 498 | if (last->ec - first->ec < WL_FREE_MAX_DIFF) |
498 | e = rb_entry(ubi->free.rb_node, | 499 | e = rb_entry(ubi->free.rb_node, |
499 | struct ubi_wl_entry, rb); | 500 | struct ubi_wl_entry, u.rb); |
500 | else { | 501 | else { |
501 | medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; | 502 | medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; |
502 | e = find_wl_entry(&ubi->free, medium_ec); | 503 | e = find_wl_entry(&ubi->free, medium_ec); |
@@ -508,7 +509,7 @@ retry: | |||
508 | * For short term data we pick a physical eraseblock with the | 509 | * For short term data we pick a physical eraseblock with the |
509 | * lowest erase counter as we expect it will be erased soon. | 510 | * lowest erase counter as we expect it will be erased soon. |
510 | */ | 511 | */ |
511 | e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); | 512 | e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb); |
512 | protect = ST_PROTECTION; | 513 | protect = ST_PROTECTION; |
513 | break; | 514 | break; |
514 | default: | 515 | default: |
@@ -522,7 +523,7 @@ retry: | |||
522 | * be protected from being moved for some time. | 523 | * be protected from being moved for some time. |
523 | */ | 524 | */ |
524 | paranoid_check_in_wl_tree(e, &ubi->free); | 525 | paranoid_check_in_wl_tree(e, &ubi->free); |
525 | rb_erase(&e->rb, &ubi->free); | 526 | rb_erase(&e->u.rb, &ubi->free); |
526 | prot_tree_add(ubi, e, pe, protect); | 527 | prot_tree_add(ubi, e, pe, protect); |
527 | 528 | ||
528 | dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); | 529 | dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); |
@@ -779,7 +780,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
779 | * highly worn-out free physical eraseblock. If the erase | 780 | * highly worn-out free physical eraseblock. If the erase |
780 | * counters differ much enough, start wear-leveling. | 781 | * counters differ much enough, start wear-leveling. |
781 | */ | 782 | */ |
782 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); | 783 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); |
783 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 784 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); |
784 | 785 | ||
785 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { | 786 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { |
@@ -788,21 +789,21 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
788 | goto out_cancel; | 789 | goto out_cancel; |
789 | } | 790 | } |
790 | paranoid_check_in_wl_tree(e1, &ubi->used); | 791 | paranoid_check_in_wl_tree(e1, &ubi->used); |
791 | rb_erase(&e1->rb, &ubi->used); | 792 | rb_erase(&e1->u.rb, &ubi->used); |
792 | dbg_wl("move PEB %d EC %d to PEB %d EC %d", | 793 | dbg_wl("move PEB %d EC %d to PEB %d EC %d", |
793 | e1->pnum, e1->ec, e2->pnum, e2->ec); | 794 | e1->pnum, e1->ec, e2->pnum, e2->ec); |
794 | } else { | 795 | } else { |
795 | /* Perform scrubbing */ | 796 | /* Perform scrubbing */ |
796 | scrubbing = 1; | 797 | scrubbing = 1; |
797 | e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); | 798 | e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); |
798 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 799 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); |
799 | paranoid_check_in_wl_tree(e1, &ubi->scrub); | 800 | paranoid_check_in_wl_tree(e1, &ubi->scrub); |
800 | rb_erase(&e1->rb, &ubi->scrub); | 801 | rb_erase(&e1->u.rb, &ubi->scrub); |
801 | dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); | 802 | dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); |
802 | } | 803 | } |
803 | 804 | ||
804 | paranoid_check_in_wl_tree(e2, &ubi->free); | 805 | paranoid_check_in_wl_tree(e2, &ubi->free); |
805 | rb_erase(&e2->rb, &ubi->free); | 806 | rb_erase(&e2->u.rb, &ubi->free); |
806 | ubi->move_from = e1; | 807 | ubi->move_from = e1; |
807 | ubi->move_to = e2; | 808 | ubi->move_to = e2; |
808 | spin_unlock(&ubi->wl_lock); | 809 | spin_unlock(&ubi->wl_lock); |
@@ -1012,7 +1013,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi) | |||
1012 | * erase counter of free physical eraseblocks is greater then | 1013 | * erase counter of free physical eraseblocks is greater then |
1013 | * %UBI_WL_THRESHOLD. | 1014 | * %UBI_WL_THRESHOLD. |
1014 | */ | 1015 | */ |
1015 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); | 1016 | e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); |
1016 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 1017 | e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); |
1017 | 1018 | ||
1018 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) | 1019 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) |
@@ -1214,10 +1215,10 @@ retry: | |||
1214 | } else { | 1215 | } else { |
1215 | if (in_wl_tree(e, &ubi->used)) { | 1216 | if (in_wl_tree(e, &ubi->used)) { |
1216 | paranoid_check_in_wl_tree(e, &ubi->used); | 1217 | paranoid_check_in_wl_tree(e, &ubi->used); |
1217 | rb_erase(&e->rb, &ubi->used); | 1218 | rb_erase(&e->u.rb, &ubi->used); |
1218 | } else if (in_wl_tree(e, &ubi->scrub)) { | 1219 | } else if (in_wl_tree(e, &ubi->scrub)) { |
1219 | paranoid_check_in_wl_tree(e, &ubi->scrub); | 1220 | paranoid_check_in_wl_tree(e, &ubi->scrub); |
1220 | rb_erase(&e->rb, &ubi->scrub); | 1221 | rb_erase(&e->u.rb, &ubi->scrub); |
1221 | } else { | 1222 | } else { |
1222 | err = prot_tree_del(ubi, e->pnum); | 1223 | err = prot_tree_del(ubi, e->pnum); |
1223 | if (err) { | 1224 | if (err) { |
@@ -1279,7 +1280,7 @@ retry: | |||
1279 | 1280 | ||
1280 | if (in_wl_tree(e, &ubi->used)) { | 1281 | if (in_wl_tree(e, &ubi->used)) { |
1281 | paranoid_check_in_wl_tree(e, &ubi->used); | 1282 | paranoid_check_in_wl_tree(e, &ubi->used); |
1282 | rb_erase(&e->rb, &ubi->used); | 1283 | rb_erase(&e->u.rb, &ubi->used); |
1283 | } else { | 1284 | } else { |
1284 | int err; | 1285 | int err; |
1285 | 1286 | ||
@@ -1361,11 +1362,11 @@ static void tree_destroy(struct rb_root *root) | |||
1361 | else if (rb->rb_right) | 1362 | else if (rb->rb_right) |
1362 | rb = rb->rb_right; | 1363 | rb = rb->rb_right; |
1363 | else { | 1364 | else { |
1364 | e = rb_entry(rb, struct ubi_wl_entry, rb); | 1365 | e = rb_entry(rb, struct ubi_wl_entry, u.rb); |
1365 | 1366 | ||
1366 | rb = rb_parent(rb); | 1367 | rb = rb_parent(rb); |
1367 | if (rb) { | 1368 | if (rb) { |
1368 | if (rb->rb_left == &e->rb) | 1369 | if (rb->rb_left == &e->u.rb) |
1369 | rb->rb_left = NULL; | 1370 | rb->rb_left = NULL; |
1370 | else | 1371 | else |
1371 | rb->rb_right = NULL; | 1372 | rb->rb_right = NULL; |