aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-12-05 05:42:45 -0500
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2008-12-05 06:46:50 -0500
commit3c98b0a043f25fa44b289c2f35b9d6ad1d859ac9 (patch)
tree1d30e363da70c1aa051f550d25adf5f12bff9779 /drivers/mtd
parent6a8f483f33a150a0269ad4612621eb6c245eb2cf (diff)
UBI: fix error path
Make sure the resources had not already been freed before freeing them in the error path of the WL worker function. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/ubi/wl.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 667f5f451c2b..442099d76ec9 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -738,13 +738,12 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
738static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 738static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
739 int cancel) 739 int cancel)
740{ 740{
741 int err, put = 0, scrubbing = 0; 741 int err, scrubbing = 0;
742 struct ubi_wl_prot_entry *uninitialized_var(pe); 742 struct ubi_wl_prot_entry *uninitialized_var(pe);
743 struct ubi_wl_entry *e1, *e2; 743 struct ubi_wl_entry *e1, *e2;
744 struct ubi_vid_hdr *vid_hdr; 744 struct ubi_vid_hdr *vid_hdr;
745 745
746 kfree(wrk); 746 kfree(wrk);
747
748 if (cancel) 747 if (cancel)
749 return 0; 748 return 0;
750 749
@@ -864,6 +863,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
864 } 863 }
865 864
866 ubi_free_vid_hdr(ubi, vid_hdr); 865 ubi_free_vid_hdr(ubi, vid_hdr);
866 vid_hdr = NULL;
867
867 spin_lock(&ubi->wl_lock); 868 spin_lock(&ubi->wl_lock);
868 prot_tree_add(ubi, e1, pe, U_PROTECTION); 869 prot_tree_add(ubi, e1, pe, U_PROTECTION);
869 ubi_assert(!ubi->move_to_put); 870 ubi_assert(!ubi->move_to_put);
@@ -871,6 +872,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
871 ubi->wl_scheduled = 0; 872 ubi->wl_scheduled = 0;
872 spin_unlock(&ubi->wl_lock); 873 spin_unlock(&ubi->wl_lock);
873 874
875 e1 = NULL;
874 err = schedule_erase(ubi, e2, 0); 876 err = schedule_erase(ubi, e2, 0);
875 if (err) 877 if (err)
876 goto out_error; 878 goto out_error;
@@ -880,24 +882,27 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
880 882
881 /* The PEB has been successfully moved */ 883 /* The PEB has been successfully moved */
882 ubi_free_vid_hdr(ubi, vid_hdr); 884 ubi_free_vid_hdr(ubi, vid_hdr);
885 vid_hdr = NULL;
883 if (scrubbing) 886 if (scrubbing)
884 ubi_msg("scrubbed PEB %d, data moved to PEB %d", 887 ubi_msg("scrubbed PEB %d, data moved to PEB %d",
885 e1->pnum, e2->pnum); 888 e1->pnum, e2->pnum);
886 889
887 spin_lock(&ubi->wl_lock); 890 spin_lock(&ubi->wl_lock);
888 if (!ubi->move_to_put) 891 if (!ubi->move_to_put) {
889 wl_tree_add(e2, &ubi->used); 892 wl_tree_add(e2, &ubi->used);
890 else 893 e2 = NULL;
891 put = 1; 894 }
892 ubi->move_from = ubi->move_to = NULL; 895 ubi->move_from = ubi->move_to = NULL;
893 ubi->move_to_put = ubi->wl_scheduled = 0; 896 ubi->move_to_put = ubi->wl_scheduled = 0;
894 spin_unlock(&ubi->wl_lock); 897 spin_unlock(&ubi->wl_lock);
895 898
896 err = schedule_erase(ubi, e1, 0); 899 err = schedule_erase(ubi, e1, 0);
897 if (err) 900 if (err) {
901 e1 = NULL;
898 goto out_error; 902 goto out_error;
903 }
899 904
900 if (put) { 905 if (e2) {
901 /* 906 /*
902 * Well, the target PEB was put meanwhile, schedule it for 907 * Well, the target PEB was put meanwhile, schedule it for
903 * erasure. 908 * erasure.
@@ -919,6 +924,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
919 */ 924 */
920out_not_moved: 925out_not_moved:
921 ubi_free_vid_hdr(ubi, vid_hdr); 926 ubi_free_vid_hdr(ubi, vid_hdr);
927 vid_hdr = NULL;
922 spin_lock(&ubi->wl_lock); 928 spin_lock(&ubi->wl_lock);
923 if (scrubbing) 929 if (scrubbing)
924 wl_tree_add(e1, &ubi->scrub); 930 wl_tree_add(e1, &ubi->scrub);
@@ -928,6 +934,7 @@ out_not_moved:
928 ubi->move_to_put = ubi->wl_scheduled = 0; 934 ubi->move_to_put = ubi->wl_scheduled = 0;
929 spin_unlock(&ubi->wl_lock); 935 spin_unlock(&ubi->wl_lock);
930 936
937 e1 = NULL;
931 err = schedule_erase(ubi, e2, 0); 938 err = schedule_erase(ubi, e2, 0);
932 if (err) 939 if (err)
933 goto out_error; 940 goto out_error;
@@ -945,8 +952,10 @@ out_error:
945 ubi->move_to_put = ubi->wl_scheduled = 0; 952 ubi->move_to_put = ubi->wl_scheduled = 0;
946 spin_unlock(&ubi->wl_lock); 953 spin_unlock(&ubi->wl_lock);
947 954
948 kmem_cache_free(ubi_wl_entry_slab, e1); 955 if (e1)
949 kmem_cache_free(ubi_wl_entry_slab, e2); 956 kmem_cache_free(ubi_wl_entry_slab, e1);
957 if (e2)
958 kmem_cache_free(ubi_wl_entry_slab, e2);
950 ubi_ro_mode(ubi); 959 ubi_ro_mode(ubi);
951 960
952 mutex_unlock(&ubi->move_mutex); 961 mutex_unlock(&ubi->move_mutex);