diff options
author | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2008-12-05 05:23:48 -0500 |
---|---|---|
committer | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2008-12-05 06:46:49 -0500 |
commit | 6a8f483f33a150a0269ad4612621eb6c245eb2cf (patch) | |
tree | c54f9bf615c01b7b623c9b6f95104c06de887d6d /drivers/mtd | |
parent | 4df581f3dc6a91a63b9965ac8bdb47d8db294e37 (diff) |
UBI: some code re-structuring
Minor code re-structuring and commentaries fixes to improve readability.
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r-- | drivers/mtd/ubi/wl.c | 39 |
1 files changed, 23 insertions, 16 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index dcb6dac1dc54..667f5f451c2b 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -359,19 +359,18 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) | |||
359 | * @ubi: UBI device description object | 359 | * @ubi: UBI device description object |
360 | * @e: the physical eraseblock to add | 360 | * @e: the physical eraseblock to add |
361 | * @pe: protection entry object to use | 361 | * @pe: protection entry object to use |
362 | * @abs_ec: absolute erase counter value when this physical eraseblock has | 362 | * @ec: for how many erase operations this PEB should be protected |
363 | * to be removed from the protection trees. | ||
364 | * | 363 | * |
365 | * @wl->lock has to be locked. | 364 | * @wl->lock has to be locked. |
366 | */ | 365 | */ |
367 | static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, | 366 | static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, |
368 | struct ubi_wl_prot_entry *pe, int abs_ec) | 367 | struct ubi_wl_prot_entry *pe, int ec) |
369 | { | 368 | { |
370 | struct rb_node **p, *parent = NULL; | 369 | struct rb_node **p, *parent = NULL; |
371 | struct ubi_wl_prot_entry *pe1; | 370 | struct ubi_wl_prot_entry *pe1; |
372 | 371 | ||
373 | pe->e = e; | 372 | pe->e = e; |
374 | pe->abs_ec = ubi->abs_ec + abs_ec; | 373 | pe->abs_ec = ubi->abs_ec + ec; |
375 | 374 | ||
376 | p = &ubi->prot.pnum.rb_node; | 375 | p = &ubi->prot.pnum.rb_node; |
377 | while (*p) { | 376 | while (*p) { |
@@ -739,7 +738,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | |||
739 | static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | 738 | static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, |
740 | int cancel) | 739 | int cancel) |
741 | { | 740 | { |
742 | int err, put = 0, scrubbing = 0, protect = 0; | 741 | int err, put = 0, scrubbing = 0; |
743 | struct ubi_wl_prot_entry *uninitialized_var(pe); | 742 | struct ubi_wl_prot_entry *uninitialized_var(pe); |
744 | struct ubi_wl_entry *e1, *e2; | 743 | struct ubi_wl_entry *e1, *e2; |
745 | struct ubi_vid_hdr *vid_hdr; | 744 | struct ubi_vid_hdr *vid_hdr; |
@@ -864,17 +863,28 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
864 | goto out_error; | 863 | goto out_error; |
865 | } | 864 | } |
866 | 865 | ||
867 | protect = 1; | 866 | ubi_free_vid_hdr(ubi, vid_hdr); |
867 | spin_lock(&ubi->wl_lock); | ||
868 | prot_tree_add(ubi, e1, pe, U_PROTECTION); | ||
869 | ubi_assert(!ubi->move_to_put); | ||
870 | ubi->move_from = ubi->move_to = NULL; | ||
871 | ubi->wl_scheduled = 0; | ||
872 | spin_unlock(&ubi->wl_lock); | ||
873 | |||
874 | err = schedule_erase(ubi, e2, 0); | ||
875 | if (err) | ||
876 | goto out_error; | ||
877 | mutex_unlock(&ubi->move_mutex); | ||
878 | return 0; | ||
868 | } | 879 | } |
869 | 880 | ||
881 | /* The PEB has been successfully moved */ | ||
870 | ubi_free_vid_hdr(ubi, vid_hdr); | 882 | ubi_free_vid_hdr(ubi, vid_hdr); |
871 | if (scrubbing && !protect) | 883 | if (scrubbing) |
872 | ubi_msg("scrubbed PEB %d, data moved to PEB %d", | 884 | ubi_msg("scrubbed PEB %d, data moved to PEB %d", |
873 | e1->pnum, e2->pnum); | 885 | e1->pnum, e2->pnum); |
874 | 886 | ||
875 | spin_lock(&ubi->wl_lock); | 887 | spin_lock(&ubi->wl_lock); |
876 | if (protect) | ||
877 | prot_tree_add(ubi, e1, pe, protect); | ||
878 | if (!ubi->move_to_put) | 888 | if (!ubi->move_to_put) |
879 | wl_tree_add(e2, &ubi->used); | 889 | wl_tree_add(e2, &ubi->used); |
880 | else | 890 | else |
@@ -883,6 +893,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
883 | ubi->move_to_put = ubi->wl_scheduled = 0; | 893 | ubi->move_to_put = ubi->wl_scheduled = 0; |
884 | spin_unlock(&ubi->wl_lock); | 894 | spin_unlock(&ubi->wl_lock); |
885 | 895 | ||
896 | err = schedule_erase(ubi, e1, 0); | ||
897 | if (err) | ||
898 | goto out_error; | ||
899 | |||
886 | if (put) { | 900 | if (put) { |
887 | /* | 901 | /* |
888 | * Well, the target PEB was put meanwhile, schedule it for | 902 | * Well, the target PEB was put meanwhile, schedule it for |
@@ -894,13 +908,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
894 | goto out_error; | 908 | goto out_error; |
895 | } | 909 | } |
896 | 910 | ||
897 | if (!protect) { | ||
898 | err = schedule_erase(ubi, e1, 0); | ||
899 | if (err) | ||
900 | goto out_error; | ||
901 | } | ||
902 | |||
903 | |||
904 | dbg_wl("done"); | 911 | dbg_wl("done"); |
905 | mutex_unlock(&ubi->move_mutex); | 912 | mutex_unlock(&ubi->move_mutex); |
906 | return 0; | 913 | return 0; |