aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2009-05-24 04:58:58 -0400
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2009-06-02 06:53:35 -0400
commit87960c0b12d0c5a0b37e0c79aef77aa1a0b10d44 (patch)
tree1ca72a382460891273e273064624bd92f8fc3d9a /drivers/mtd/ubi
parent90bf0265e5b0d561f215a69bb7a46c4071b2c93b (diff)
UBI: fix and clean-up error paths in WL worker
This patch fixes the error path in the WL worker - in same cases UBI oopses when 'goto out_error' happens and e1 or e2 are NULL. This patch also cleans up the error paths a little. And I have tested nearly all error paths in the WL worker. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd/ubi')
-rw-r--r--drivers/mtd/ubi/eba.c14
-rw-r--r--drivers/mtd/ubi/wl.c100
2 files changed, 53 insertions, 61 deletions
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 7ab79e247245..587b6cb5040f 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -963,7 +963,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
963 vol_id = be32_to_cpu(vid_hdr->vol_id); 963 vol_id = be32_to_cpu(vid_hdr->vol_id);
964 lnum = be32_to_cpu(vid_hdr->lnum); 964 lnum = be32_to_cpu(vid_hdr->lnum);
965 965
966 dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); 966 dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
967 967
968 if (vid_hdr->vol_type == UBI_VID_STATIC) { 968 if (vid_hdr->vol_type == UBI_VID_STATIC) {
969 data_size = be32_to_cpu(vid_hdr->data_size); 969 data_size = be32_to_cpu(vid_hdr->data_size);
@@ -984,7 +984,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
984 spin_unlock(&ubi->volumes_lock); 984 spin_unlock(&ubi->volumes_lock);
985 if (!vol) { 985 if (!vol) {
986 /* No need to do further work, cancel */ 986 /* No need to do further work, cancel */
987 dbg_eba("volume %d is being removed, cancel", vol_id); 987 dbg_wl("volume %d is being removed, cancel", vol_id);
988 return MOVE_CANCEL_RACE; 988 return MOVE_CANCEL_RACE;
989 } 989 }
990 990
@@ -1003,7 +1003,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1003 */ 1003 */
1004 err = leb_write_trylock(ubi, vol_id, lnum); 1004 err = leb_write_trylock(ubi, vol_id, lnum);
1005 if (err) { 1005 if (err) {
1006 dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum); 1006 dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
1007 return MOVE_CANCEL_RACE; 1007 return MOVE_CANCEL_RACE;
1008 } 1008 }
1009 1009
@@ -1013,9 +1013,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1013 * cancel it. 1013 * cancel it.
1014 */ 1014 */
1015 if (vol->eba_tbl[lnum] != from) { 1015 if (vol->eba_tbl[lnum] != from) {
1016 dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " 1016 dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to "
1017 "PEB %d, cancel", vol_id, lnum, from, 1017 "PEB %d, cancel", vol_id, lnum, from,
1018 vol->eba_tbl[lnum]); 1018 vol->eba_tbl[lnum]);
1019 err = MOVE_CANCEL_RACE; 1019 err = MOVE_CANCEL_RACE;
1020 goto out_unlock_leb; 1020 goto out_unlock_leb;
1021 } 1021 }
@@ -1027,7 +1027,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1027 * @ubi->buf_mutex. 1027 * @ubi->buf_mutex.
1028 */ 1028 */
1029 mutex_lock(&ubi->buf_mutex); 1029 mutex_lock(&ubi->buf_mutex);
1030 dbg_eba("read %d bytes of data", aldata_size); 1030 dbg_wl("read %d bytes of data", aldata_size);
1031 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); 1031 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
1032 if (err && err != UBI_IO_BITFLIPS) { 1032 if (err && err != UBI_IO_BITFLIPS) {
1033 ubi_warn("error %d while reading data from PEB %d", 1033 ubi_warn("error %d while reading data from PEB %d",
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index ec915c02301c..793882ba2a6e 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -653,7 +653,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
653static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 653static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
654 int cancel) 654 int cancel)
655{ 655{
656 int err, scrubbing = 0, torture = 0; 656 int err, scrubbing = 0, torture = 0, protect = 0;
657 struct ubi_wl_entry *e1, *e2; 657 struct ubi_wl_entry *e1, *e2;
658 struct ubi_vid_hdr *vid_hdr; 658 struct ubi_vid_hdr *vid_hdr;
659 659
@@ -738,64 +738,52 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
738 /* 738 /*
739 * We are trying to move PEB without a VID header. UBI 739 * We are trying to move PEB without a VID header. UBI
740 * always write VID headers shortly after the PEB was 740 * always write VID headers shortly after the PEB was
741 * given, so we have a situation when it did not have 741 * given, so we have a situation when it has not yet
742 * chance to write it down because it was preempted. 742 * had a chance to write it, because it was preempted.
743 * Just re-schedule the work, so that next time it will 743 * So add this PEB to the protection queue so far,
744 * likely have the VID header in place. 744 * because presubably more data will be written there
745 * (including the missin VID header), and then we'll
746 * move it.
745 */ 747 */
746 dbg_wl("PEB %d has no VID header", e1->pnum); 748 dbg_wl("PEB %d has no VID header", e1->pnum);
749 protect = 1;
747 goto out_not_moved; 750 goto out_not_moved;
748 } 751 }
749 752
750 ubi_err("error %d while reading VID header from PEB %d", 753 ubi_err("error %d while reading VID header from PEB %d",
751 err, e1->pnum); 754 err, e1->pnum);
752 if (err > 0)
753 err = -EIO;
754 goto out_error; 755 goto out_error;
755 } 756 }
756 757
757 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 758 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
758 if (err) { 759 if (err) {
760 if (err == MOVE_CANCEL_RACE) {
761 /*
762 * The LEB has not been moved because the volume is
763 * being deleted or the PEB has been put meanwhile. We
764 * should prevent this PEB from being selected for
765 * wear-leveling movement again, so put it to the
766 * protection queue.
767 */
768 protect = 1;
769 goto out_not_moved;
770 }
771
759 if (err == MOVE_CANCEL_BITFLIPS || 772 if (err == MOVE_CANCEL_BITFLIPS ||
760 err == MOVE_TARGET_WR_ERR) { 773 err == MOVE_TARGET_WR_ERR) {
761 /* Target PEB bit-flips or write error, torture it */ 774 /* Target PEB bit-flips or write error, torture it */
762 torture = 1; 775 torture = 1;
763 goto out_not_moved; 776 goto out_not_moved;
764 } 777 }
778
765 if (err < 0) 779 if (err < 0)
766 goto out_error; 780 goto out_error;
767 781
768 /* 782 ubi_assert(0);
769 * The LEB has not been moved because the volume is being
770 * deleted or the PEB has been put meanwhile. We should prevent
771 * this PEB from being selected for wear-leveling movement
772 * again, so put it to the protection queue.
773 */
774
775 dbg_wl("canceled moving PEB %d", e1->pnum);
776 ubi_assert(err == MOVE_CANCEL_RACE);
777
778 ubi_free_vid_hdr(ubi, vid_hdr);
779 vid_hdr = NULL;
780
781 spin_lock(&ubi->wl_lock);
782 prot_queue_add(ubi, e1);
783 ubi_assert(!ubi->move_to_put);
784 ubi->move_from = ubi->move_to = NULL;
785 ubi->wl_scheduled = 0;
786 spin_unlock(&ubi->wl_lock);
787
788 e1 = NULL;
789 err = schedule_erase(ubi, e2, 0);
790 if (err)
791 goto out_error;
792 mutex_unlock(&ubi->move_mutex);
793 return 0;
794 } 783 }
795 784
796 /* The PEB has been successfully moved */ 785 /* The PEB has been successfully moved */
797 ubi_free_vid_hdr(ubi, vid_hdr); 786 ubi_free_vid_hdr(ubi, vid_hdr);
798 vid_hdr = NULL;
799 if (scrubbing) 787 if (scrubbing)
800 ubi_msg("scrubbed PEB %d, data moved to PEB %d", 788 ubi_msg("scrubbed PEB %d, data moved to PEB %d",
801 e1->pnum, e2->pnum); 789 e1->pnum, e2->pnum);
@@ -811,8 +799,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
811 799
812 err = schedule_erase(ubi, e1, 0); 800 err = schedule_erase(ubi, e1, 0);
813 if (err) { 801 if (err) {
814 e1 = NULL; 802 kmem_cache_free(ubi_wl_entry_slab, e1);
815 goto out_error; 803 kmem_cache_free(ubi_wl_entry_slab, e2);
804 goto out_ro;
816 } 805 }
817 806
818 if (e2) { 807 if (e2) {
@@ -822,8 +811,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
822 */ 811 */
823 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); 812 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
824 err = schedule_erase(ubi, e2, 0); 813 err = schedule_erase(ubi, e2, 0);
825 if (err) 814 if (err) {
826 goto out_error; 815 kmem_cache_free(ubi_wl_entry_slab, e2);
816 goto out_ro;
817 }
827 } 818 }
828 819
829 dbg_wl("done"); 820 dbg_wl("done");
@@ -836,11 +827,12 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
836 * have been changed, schedule it for erasure. 827 * have been changed, schedule it for erasure.
837 */ 828 */
838out_not_moved: 829out_not_moved:
839 dbg_wl("canceled moving PEB %d", e1->pnum); 830 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
840 ubi_free_vid_hdr(ubi, vid_hdr); 831 e1->pnum, e2->pnum, err);
841 vid_hdr = NULL;
842 spin_lock(&ubi->wl_lock); 832 spin_lock(&ubi->wl_lock);
843 if (scrubbing) 833 if (protect)
834 prot_queue_add(ubi, e1);
835 else if (scrubbing)
844 wl_tree_add(e1, &ubi->scrub); 836 wl_tree_add(e1, &ubi->scrub);
845 else 837 else
846 wl_tree_add(e1, &ubi->used); 838 wl_tree_add(e1, &ubi->used);
@@ -849,32 +841,32 @@ out_not_moved:
849 ubi->wl_scheduled = 0; 841 ubi->wl_scheduled = 0;
850 spin_unlock(&ubi->wl_lock); 842 spin_unlock(&ubi->wl_lock);
851 843
852 e1 = NULL; 844 ubi_free_vid_hdr(ubi, vid_hdr);
853 err = schedule_erase(ubi, e2, torture); 845 err = schedule_erase(ubi, e2, torture);
854 if (err) 846 if (err) {
855 goto out_error; 847 kmem_cache_free(ubi_wl_entry_slab, e2);
856 848 goto out_ro;
849 }
857 mutex_unlock(&ubi->move_mutex); 850 mutex_unlock(&ubi->move_mutex);
858 return 0; 851 return 0;
859 852
860out_error: 853out_error:
861 ubi_err("error %d while moving PEB %d to PEB %d", 854 ubi_err("error %d while moving PEB %d to PEB %d",
862 err, e1->pnum, e2->pnum); 855 err, e1->pnum, e2->pnum);
863
864 ubi_free_vid_hdr(ubi, vid_hdr);
865 spin_lock(&ubi->wl_lock); 856 spin_lock(&ubi->wl_lock);
866 ubi->move_from = ubi->move_to = NULL; 857 ubi->move_from = ubi->move_to = NULL;
867 ubi->move_to_put = ubi->wl_scheduled = 0; 858 ubi->move_to_put = ubi->wl_scheduled = 0;
868 spin_unlock(&ubi->wl_lock); 859 spin_unlock(&ubi->wl_lock);
869 860
870 if (e1) 861 ubi_free_vid_hdr(ubi, vid_hdr);
871 kmem_cache_free(ubi_wl_entry_slab, e1); 862 kmem_cache_free(ubi_wl_entry_slab, e1);
872 if (e2) 863 kmem_cache_free(ubi_wl_entry_slab, e2);
873 kmem_cache_free(ubi_wl_entry_slab, e2);
874 ubi_ro_mode(ubi);
875 864
865out_ro:
866 ubi_ro_mode(ubi);
876 mutex_unlock(&ubi->move_mutex); 867 mutex_unlock(&ubi->move_mutex);
877 return err; 868 ubi_assert(err != 0);
869 return err < 0 ? err : -EIO;
878 870
879out_cancel: 871out_cancel:
880 ubi->wl_scheduled = 0; 872 ubi->wl_scheduled = 0;