diff options
author | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2009-05-24 04:58:58 -0400 |
---|---|---|
committer | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2009-06-02 06:53:35 -0400 |
commit | 87960c0b12d0c5a0b37e0c79aef77aa1a0b10d44 (patch) | |
tree | 1ca72a382460891273e273064624bd92f8fc3d9a /drivers/mtd/ubi/wl.c | |
parent | 90bf0265e5b0d561f215a69bb7a46c4071b2c93b (diff) |
UBI: fix and clean-up error paths in WL worker
This patch fixes the error path in the WL worker - in same cases
UBI oopses when 'goto out_error' happens and e1 or e2 are NULL.
This patch also cleans up the error paths a little. And I have
tested nearly all error paths in the WL worker.
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd/ubi/wl.c')
-rw-r--r-- | drivers/mtd/ubi/wl.c | 100 |
1 files changed, 46 insertions, 54 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index ec915c02301c..793882ba2a6e 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -653,7 +653,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | |||
653 | static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | 653 | static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, |
654 | int cancel) | 654 | int cancel) |
655 | { | 655 | { |
656 | int err, scrubbing = 0, torture = 0; | 656 | int err, scrubbing = 0, torture = 0, protect = 0; |
657 | struct ubi_wl_entry *e1, *e2; | 657 | struct ubi_wl_entry *e1, *e2; |
658 | struct ubi_vid_hdr *vid_hdr; | 658 | struct ubi_vid_hdr *vid_hdr; |
659 | 659 | ||
@@ -738,64 +738,52 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
738 | /* | 738 | /* |
739 | * We are trying to move PEB without a VID header. UBI | 739 | * We are trying to move PEB without a VID header. UBI |
740 | * always write VID headers shortly after the PEB was | 740 | * always write VID headers shortly after the PEB was |
741 | * given, so we have a situation when it did not have | 741 | * given, so we have a situation when it has not yet |
742 | * chance to write it down because it was preempted. | 742 | * had a chance to write it, because it was preempted. |
743 | * Just re-schedule the work, so that next time it will | 743 | * So add this PEB to the protection queue so far, |
744 | * likely have the VID header in place. | 744 | * because presubably more data will be written there |
745 | * (including the missin VID header), and then we'll | ||
746 | * move it. | ||
745 | */ | 747 | */ |
746 | dbg_wl("PEB %d has no VID header", e1->pnum); | 748 | dbg_wl("PEB %d has no VID header", e1->pnum); |
749 | protect = 1; | ||
747 | goto out_not_moved; | 750 | goto out_not_moved; |
748 | } | 751 | } |
749 | 752 | ||
750 | ubi_err("error %d while reading VID header from PEB %d", | 753 | ubi_err("error %d while reading VID header from PEB %d", |
751 | err, e1->pnum); | 754 | err, e1->pnum); |
752 | if (err > 0) | ||
753 | err = -EIO; | ||
754 | goto out_error; | 755 | goto out_error; |
755 | } | 756 | } |
756 | 757 | ||
757 | err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); | 758 | err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); |
758 | if (err) { | 759 | if (err) { |
760 | if (err == MOVE_CANCEL_RACE) { | ||
761 | /* | ||
762 | * The LEB has not been moved because the volume is | ||
763 | * being deleted or the PEB has been put meanwhile. We | ||
764 | * should prevent this PEB from being selected for | ||
765 | * wear-leveling movement again, so put it to the | ||
766 | * protection queue. | ||
767 | */ | ||
768 | protect = 1; | ||
769 | goto out_not_moved; | ||
770 | } | ||
771 | |||
759 | if (err == MOVE_CANCEL_BITFLIPS || | 772 | if (err == MOVE_CANCEL_BITFLIPS || |
760 | err == MOVE_TARGET_WR_ERR) { | 773 | err == MOVE_TARGET_WR_ERR) { |
761 | /* Target PEB bit-flips or write error, torture it */ | 774 | /* Target PEB bit-flips or write error, torture it */ |
762 | torture = 1; | 775 | torture = 1; |
763 | goto out_not_moved; | 776 | goto out_not_moved; |
764 | } | 777 | } |
778 | |||
765 | if (err < 0) | 779 | if (err < 0) |
766 | goto out_error; | 780 | goto out_error; |
767 | 781 | ||
768 | /* | 782 | ubi_assert(0); |
769 | * The LEB has not been moved because the volume is being | ||
770 | * deleted or the PEB has been put meanwhile. We should prevent | ||
771 | * this PEB from being selected for wear-leveling movement | ||
772 | * again, so put it to the protection queue. | ||
773 | */ | ||
774 | |||
775 | dbg_wl("canceled moving PEB %d", e1->pnum); | ||
776 | ubi_assert(err == MOVE_CANCEL_RACE); | ||
777 | |||
778 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
779 | vid_hdr = NULL; | ||
780 | |||
781 | spin_lock(&ubi->wl_lock); | ||
782 | prot_queue_add(ubi, e1); | ||
783 | ubi_assert(!ubi->move_to_put); | ||
784 | ubi->move_from = ubi->move_to = NULL; | ||
785 | ubi->wl_scheduled = 0; | ||
786 | spin_unlock(&ubi->wl_lock); | ||
787 | |||
788 | e1 = NULL; | ||
789 | err = schedule_erase(ubi, e2, 0); | ||
790 | if (err) | ||
791 | goto out_error; | ||
792 | mutex_unlock(&ubi->move_mutex); | ||
793 | return 0; | ||
794 | } | 783 | } |
795 | 784 | ||
796 | /* The PEB has been successfully moved */ | 785 | /* The PEB has been successfully moved */ |
797 | ubi_free_vid_hdr(ubi, vid_hdr); | 786 | ubi_free_vid_hdr(ubi, vid_hdr); |
798 | vid_hdr = NULL; | ||
799 | if (scrubbing) | 787 | if (scrubbing) |
800 | ubi_msg("scrubbed PEB %d, data moved to PEB %d", | 788 | ubi_msg("scrubbed PEB %d, data moved to PEB %d", |
801 | e1->pnum, e2->pnum); | 789 | e1->pnum, e2->pnum); |
@@ -811,8 +799,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
811 | 799 | ||
812 | err = schedule_erase(ubi, e1, 0); | 800 | err = schedule_erase(ubi, e1, 0); |
813 | if (err) { | 801 | if (err) { |
814 | e1 = NULL; | 802 | kmem_cache_free(ubi_wl_entry_slab, e1); |
815 | goto out_error; | 803 | kmem_cache_free(ubi_wl_entry_slab, e2); |
804 | goto out_ro; | ||
816 | } | 805 | } |
817 | 806 | ||
818 | if (e2) { | 807 | if (e2) { |
@@ -822,8 +811,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
822 | */ | 811 | */ |
823 | dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); | 812 | dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); |
824 | err = schedule_erase(ubi, e2, 0); | 813 | err = schedule_erase(ubi, e2, 0); |
825 | if (err) | 814 | if (err) { |
826 | goto out_error; | 815 | kmem_cache_free(ubi_wl_entry_slab, e2); |
816 | goto out_ro; | ||
817 | } | ||
827 | } | 818 | } |
828 | 819 | ||
829 | dbg_wl("done"); | 820 | dbg_wl("done"); |
@@ -836,11 +827,12 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
836 | * have been changed, schedule it for erasure. | 827 | * have been changed, schedule it for erasure. |
837 | */ | 828 | */ |
838 | out_not_moved: | 829 | out_not_moved: |
839 | dbg_wl("canceled moving PEB %d", e1->pnum); | 830 | dbg_wl("cancel moving PEB %d to PEB %d (%d)", |
840 | ubi_free_vid_hdr(ubi, vid_hdr); | 831 | e1->pnum, e2->pnum, err); |
841 | vid_hdr = NULL; | ||
842 | spin_lock(&ubi->wl_lock); | 832 | spin_lock(&ubi->wl_lock); |
843 | if (scrubbing) | 833 | if (protect) |
834 | prot_queue_add(ubi, e1); | ||
835 | else if (scrubbing) | ||
844 | wl_tree_add(e1, &ubi->scrub); | 836 | wl_tree_add(e1, &ubi->scrub); |
845 | else | 837 | else |
846 | wl_tree_add(e1, &ubi->used); | 838 | wl_tree_add(e1, &ubi->used); |
@@ -849,32 +841,32 @@ out_not_moved: | |||
849 | ubi->wl_scheduled = 0; | 841 | ubi->wl_scheduled = 0; |
850 | spin_unlock(&ubi->wl_lock); | 842 | spin_unlock(&ubi->wl_lock); |
851 | 843 | ||
852 | e1 = NULL; | 844 | ubi_free_vid_hdr(ubi, vid_hdr); |
853 | err = schedule_erase(ubi, e2, torture); | 845 | err = schedule_erase(ubi, e2, torture); |
854 | if (err) | 846 | if (err) { |
855 | goto out_error; | 847 | kmem_cache_free(ubi_wl_entry_slab, e2); |
856 | 848 | goto out_ro; | |
849 | } | ||
857 | mutex_unlock(&ubi->move_mutex); | 850 | mutex_unlock(&ubi->move_mutex); |
858 | return 0; | 851 | return 0; |
859 | 852 | ||
860 | out_error: | 853 | out_error: |
861 | ubi_err("error %d while moving PEB %d to PEB %d", | 854 | ubi_err("error %d while moving PEB %d to PEB %d", |
862 | err, e1->pnum, e2->pnum); | 855 | err, e1->pnum, e2->pnum); |
863 | |||
864 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
865 | spin_lock(&ubi->wl_lock); | 856 | spin_lock(&ubi->wl_lock); |
866 | ubi->move_from = ubi->move_to = NULL; | 857 | ubi->move_from = ubi->move_to = NULL; |
867 | ubi->move_to_put = ubi->wl_scheduled = 0; | 858 | ubi->move_to_put = ubi->wl_scheduled = 0; |
868 | spin_unlock(&ubi->wl_lock); | 859 | spin_unlock(&ubi->wl_lock); |
869 | 860 | ||
870 | if (e1) | 861 | ubi_free_vid_hdr(ubi, vid_hdr); |
871 | kmem_cache_free(ubi_wl_entry_slab, e1); | 862 | kmem_cache_free(ubi_wl_entry_slab, e1); |
872 | if (e2) | 863 | kmem_cache_free(ubi_wl_entry_slab, e2); |
873 | kmem_cache_free(ubi_wl_entry_slab, e2); | ||
874 | ubi_ro_mode(ubi); | ||
875 | 864 | ||
865 | out_ro: | ||
866 | ubi_ro_mode(ubi); | ||
876 | mutex_unlock(&ubi->move_mutex); | 867 | mutex_unlock(&ubi->move_mutex); |
877 | return err; | 868 | ubi_assert(err != 0); |
869 | return err < 0 ? err : -EIO; | ||
878 | 870 | ||
879 | out_cancel: | 871 | out_cancel: |
880 | ubi->wl_scheduled = 0; | 872 | ubi->wl_scheduled = 0; |