diff options
Diffstat (limited to 'drivers/lightnvm/core.c')
-rw-r--r-- | drivers/lightnvm/core.c | 309 |
1 files changed, 251 insertions, 58 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 964352720a03..8df188e0767e 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c | |||
@@ -717,46 +717,6 @@ static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, | |||
717 | nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list); | 717 | nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list); |
718 | } | 718 | } |
719 | 719 | ||
720 | int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct nvm_chk_meta *meta, | ||
721 | struct ppa_addr ppa, int nchks) | ||
722 | { | ||
723 | struct nvm_dev *dev = tgt_dev->parent; | ||
724 | |||
725 | nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1); | ||
726 | |||
727 | return dev->ops->get_chk_meta(tgt_dev->parent, meta, | ||
728 | (sector_t)ppa.ppa, nchks); | ||
729 | } | ||
730 | EXPORT_SYMBOL(nvm_get_chunk_meta); | ||
731 | |||
732 | int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, | ||
733 | int nr_ppas, int type) | ||
734 | { | ||
735 | struct nvm_dev *dev = tgt_dev->parent; | ||
736 | struct nvm_rq rqd; | ||
737 | int ret; | ||
738 | |||
739 | if (nr_ppas > NVM_MAX_VLBA) { | ||
740 | pr_err("nvm: unable to update all blocks atomically\n"); | ||
741 | return -EINVAL; | ||
742 | } | ||
743 | |||
744 | memset(&rqd, 0, sizeof(struct nvm_rq)); | ||
745 | |||
746 | nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas); | ||
747 | nvm_rq_tgt_to_dev(tgt_dev, &rqd); | ||
748 | |||
749 | ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); | ||
750 | nvm_free_rqd_ppalist(tgt_dev, &rqd); | ||
751 | if (ret) { | ||
752 | pr_err("nvm: failed bb mark\n"); | ||
753 | return -EINVAL; | ||
754 | } | ||
755 | |||
756 | return 0; | ||
757 | } | ||
758 | EXPORT_SYMBOL(nvm_set_tgt_bb_tbl); | ||
759 | |||
760 | static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd) | 720 | static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd) |
761 | { | 721 | { |
762 | int flags = 0; | 722 | int flags = 0; |
@@ -830,27 +790,159 @@ void nvm_end_io(struct nvm_rq *rqd) | |||
830 | } | 790 | } |
831 | EXPORT_SYMBOL(nvm_end_io); | 791 | EXPORT_SYMBOL(nvm_end_io); |
832 | 792 | ||
793 | static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd) | ||
794 | { | ||
795 | if (!dev->ops->submit_io_sync) | ||
796 | return -ENODEV; | ||
797 | |||
798 | rqd->flags = nvm_set_flags(&dev->geo, rqd); | ||
799 | |||
800 | return dev->ops->submit_io_sync(dev, rqd); | ||
801 | } | ||
802 | |||
803 | static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa) | ||
804 | { | ||
805 | struct nvm_rq rqd = { NULL }; | ||
806 | struct bio bio; | ||
807 | struct bio_vec bio_vec; | ||
808 | struct page *page; | ||
809 | int ret; | ||
810 | |||
811 | page = alloc_page(GFP_KERNEL); | ||
812 | if (!page) | ||
813 | return -ENOMEM; | ||
814 | |||
815 | bio_init(&bio, &bio_vec, 1); | ||
816 | bio_add_page(&bio, page, PAGE_SIZE, 0); | ||
817 | bio_set_op_attrs(&bio, REQ_OP_READ, 0); | ||
818 | |||
819 | rqd.bio = &bio; | ||
820 | rqd.opcode = NVM_OP_PREAD; | ||
821 | rqd.is_seq = 1; | ||
822 | rqd.nr_ppas = 1; | ||
823 | rqd.ppa_addr = generic_to_dev_addr(dev, ppa); | ||
824 | |||
825 | ret = nvm_submit_io_sync_raw(dev, &rqd); | ||
826 | if (ret) | ||
827 | return ret; | ||
828 | |||
829 | __free_page(page); | ||
830 | |||
831 | return rqd.error; | ||
832 | } | ||
833 | |||
833 | /* | 834 | /* |
834 | * folds a bad block list from its plane representation to its virtual | 835 | * Scans a 1.2 chunk first and last page to determine if its state. |
835 | * block representation. The fold is done in place and reduced size is | 836 | * If the chunk is found to be open, also scan it to update the write |
836 | * returned. | 837 | * pointer. |
837 | * | ||
838 | * If any of the planes status are bad or grown bad block, the virtual block | ||
839 | * is marked bad. If not bad, the first plane state acts as the block state. | ||
840 | */ | 838 | */ |
841 | int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks) | 839 | static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa, |
840 | struct nvm_chk_meta *meta) | ||
842 | { | 841 | { |
843 | struct nvm_geo *geo = &dev->geo; | 842 | struct nvm_geo *geo = &dev->geo; |
844 | int blk, offset, pl, blktype; | 843 | int ret, pg, pl; |
845 | 844 | ||
846 | if (nr_blks != geo->num_chk * geo->pln_mode) | 845 | /* sense first page */ |
847 | return -EINVAL; | 846 | ret = nvm_bb_chunk_sense(dev, ppa); |
847 | if (ret < 0) /* io error */ | ||
848 | return ret; | ||
849 | else if (ret == 0) /* valid data */ | ||
850 | meta->state = NVM_CHK_ST_OPEN; | ||
851 | else if (ret > 0) { | ||
852 | /* | ||
853 | * If empty page, the chunk is free, else it is an | ||
854 | * actual io error. In that case, mark it offline. | ||
855 | */ | ||
856 | switch (ret) { | ||
857 | case NVM_RSP_ERR_EMPTYPAGE: | ||
858 | meta->state = NVM_CHK_ST_FREE; | ||
859 | return 0; | ||
860 | case NVM_RSP_ERR_FAILCRC: | ||
861 | case NVM_RSP_ERR_FAILECC: | ||
862 | case NVM_RSP_WARN_HIGHECC: | ||
863 | meta->state = NVM_CHK_ST_OPEN; | ||
864 | goto scan; | ||
865 | default: | ||
866 | return -ret; /* other io error */ | ||
867 | } | ||
868 | } | ||
869 | |||
870 | /* sense last page */ | ||
871 | ppa.g.pg = geo->num_pg - 1; | ||
872 | ppa.g.pl = geo->num_pln - 1; | ||
873 | |||
874 | ret = nvm_bb_chunk_sense(dev, ppa); | ||
875 | if (ret < 0) /* io error */ | ||
876 | return ret; | ||
877 | else if (ret == 0) { /* Chunk fully written */ | ||
878 | meta->state = NVM_CHK_ST_CLOSED; | ||
879 | meta->wp = geo->clba; | ||
880 | return 0; | ||
881 | } else if (ret > 0) { | ||
882 | switch (ret) { | ||
883 | case NVM_RSP_ERR_EMPTYPAGE: | ||
884 | case NVM_RSP_ERR_FAILCRC: | ||
885 | case NVM_RSP_ERR_FAILECC: | ||
886 | case NVM_RSP_WARN_HIGHECC: | ||
887 | meta->state = NVM_CHK_ST_OPEN; | ||
888 | break; | ||
889 | default: | ||
890 | return -ret; /* other io error */ | ||
891 | } | ||
892 | } | ||
893 | |||
894 | scan: | ||
895 | /* | ||
896 | * chunk is open, we scan sequentially to update the write pointer. | ||
897 | * We make the assumption that targets write data across all planes | ||
898 | * before moving to the next page. | ||
899 | */ | ||
900 | for (pg = 0; pg < geo->num_pg; pg++) { | ||
901 | for (pl = 0; pl < geo->num_pln; pl++) { | ||
902 | ppa.g.pg = pg; | ||
903 | ppa.g.pl = pl; | ||
904 | |||
905 | ret = nvm_bb_chunk_sense(dev, ppa); | ||
906 | if (ret < 0) /* io error */ | ||
907 | return ret; | ||
908 | else if (ret == 0) { | ||
909 | meta->wp += geo->ws_min; | ||
910 | } else if (ret > 0) { | ||
911 | switch (ret) { | ||
912 | case NVM_RSP_ERR_EMPTYPAGE: | ||
913 | return 0; | ||
914 | case NVM_RSP_ERR_FAILCRC: | ||
915 | case NVM_RSP_ERR_FAILECC: | ||
916 | case NVM_RSP_WARN_HIGHECC: | ||
917 | meta->wp += geo->ws_min; | ||
918 | break; | ||
919 | default: | ||
920 | return -ret; /* other io error */ | ||
921 | } | ||
922 | } | ||
923 | } | ||
924 | } | ||
925 | |||
926 | return 0; | ||
927 | } | ||
928 | |||
929 | /* | ||
930 | * folds a bad block list from its plane representation to its | ||
931 | * chunk representation. | ||
932 | * | ||
933 | * If any of the planes status are bad or grown bad, the chunk is marked | ||
934 | * offline. If not bad, the first plane state acts as the chunk state. | ||
935 | */ | ||
936 | static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa, | ||
937 | u8 *blks, int nr_blks, struct nvm_chk_meta *meta) | ||
938 | { | ||
939 | struct nvm_geo *geo = &dev->geo; | ||
940 | int ret, blk, pl, offset, blktype; | ||
848 | 941 | ||
849 | for (blk = 0; blk < geo->num_chk; blk++) { | 942 | for (blk = 0; blk < geo->num_chk; blk++) { |
850 | offset = blk * geo->pln_mode; | 943 | offset = blk * geo->pln_mode; |
851 | blktype = blks[offset]; | 944 | blktype = blks[offset]; |
852 | 945 | ||
853 | /* Bad blocks on any planes take precedence over other types */ | ||
854 | for (pl = 0; pl < geo->pln_mode; pl++) { | 946 | for (pl = 0; pl < geo->pln_mode; pl++) { |
855 | if (blks[offset + pl] & | 947 | if (blks[offset + pl] & |
856 | (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) { | 948 | (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) { |
@@ -859,23 +951,124 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks) | |||
859 | } | 951 | } |
860 | } | 952 | } |
861 | 953 | ||
862 | blks[blk] = blktype; | 954 | ppa.g.blk = blk; |
955 | |||
956 | meta->wp = 0; | ||
957 | meta->type = NVM_CHK_TP_W_SEQ; | ||
958 | meta->wi = 0; | ||
959 | meta->slba = generic_to_dev_addr(dev, ppa).ppa; | ||
960 | meta->cnlb = dev->geo.clba; | ||
961 | |||
962 | if (blktype == NVM_BLK_T_FREE) { | ||
963 | ret = nvm_bb_chunk_scan(dev, ppa, meta); | ||
964 | if (ret) | ||
965 | return ret; | ||
966 | } else { | ||
967 | meta->state = NVM_CHK_ST_OFFLINE; | ||
968 | } | ||
969 | |||
970 | meta++; | ||
863 | } | 971 | } |
864 | 972 | ||
865 | return geo->num_chk; | 973 | return 0; |
974 | } | ||
975 | |||
976 | static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba, | ||
977 | int nchks, struct nvm_chk_meta *meta) | ||
978 | { | ||
979 | struct nvm_geo *geo = &dev->geo; | ||
980 | struct ppa_addr ppa; | ||
981 | u8 *blks; | ||
982 | int ch, lun, nr_blks; | ||
983 | int ret; | ||
984 | |||
985 | ppa.ppa = slba; | ||
986 | ppa = dev_to_generic_addr(dev, ppa); | ||
987 | |||
988 | if (ppa.g.blk != 0) | ||
989 | return -EINVAL; | ||
990 | |||
991 | if ((nchks % geo->num_chk) != 0) | ||
992 | return -EINVAL; | ||
993 | |||
994 | nr_blks = geo->num_chk * geo->pln_mode; | ||
995 | |||
996 | blks = kmalloc(nr_blks, GFP_KERNEL); | ||
997 | if (!blks) | ||
998 | return -ENOMEM; | ||
999 | |||
1000 | for (ch = ppa.g.ch; ch < geo->num_ch; ch++) { | ||
1001 | for (lun = ppa.g.lun; lun < geo->num_lun; lun++) { | ||
1002 | struct ppa_addr ppa_gen, ppa_dev; | ||
1003 | |||
1004 | if (!nchks) | ||
1005 | goto done; | ||
1006 | |||
1007 | ppa_gen.ppa = 0; | ||
1008 | ppa_gen.g.ch = ch; | ||
1009 | ppa_gen.g.lun = lun; | ||
1010 | ppa_dev = generic_to_dev_addr(dev, ppa_gen); | ||
1011 | |||
1012 | ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks); | ||
1013 | if (ret) | ||
1014 | goto done; | ||
1015 | |||
1016 | ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks, | ||
1017 | meta); | ||
1018 | if (ret) | ||
1019 | goto done; | ||
1020 | |||
1021 | meta += geo->num_chk; | ||
1022 | nchks -= geo->num_chk; | ||
1023 | } | ||
1024 | } | ||
1025 | done: | ||
1026 | kfree(blks); | ||
1027 | return ret; | ||
866 | } | 1028 | } |
867 | EXPORT_SYMBOL(nvm_bb_tbl_fold); | ||
868 | 1029 | ||
869 | int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa, | 1030 | int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa, |
870 | u8 *blks) | 1031 | int nchks, struct nvm_chk_meta *meta) |
871 | { | 1032 | { |
872 | struct nvm_dev *dev = tgt_dev->parent; | 1033 | struct nvm_dev *dev = tgt_dev->parent; |
873 | 1034 | ||
874 | nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1); | 1035 | nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1); |
875 | 1036 | ||
876 | return dev->ops->get_bb_tbl(dev, ppa, blks); | 1037 | if (dev->geo.version == NVM_OCSSD_SPEC_12) |
1038 | return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta); | ||
1039 | |||
1040 | return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta); | ||
1041 | } | ||
1042 | EXPORT_SYMBOL_GPL(nvm_get_chunk_meta); | ||
1043 | |||
1044 | int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, | ||
1045 | int nr_ppas, int type) | ||
1046 | { | ||
1047 | struct nvm_dev *dev = tgt_dev->parent; | ||
1048 | struct nvm_rq rqd; | ||
1049 | int ret; | ||
1050 | |||
1051 | if (dev->geo.version == NVM_OCSSD_SPEC_20) | ||
1052 | return 0; | ||
1053 | |||
1054 | if (nr_ppas > NVM_MAX_VLBA) { | ||
1055 | pr_err("nvm: unable to update all blocks atomically\n"); | ||
1056 | return -EINVAL; | ||
1057 | } | ||
1058 | |||
1059 | memset(&rqd, 0, sizeof(struct nvm_rq)); | ||
1060 | |||
1061 | nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas); | ||
1062 | nvm_rq_tgt_to_dev(tgt_dev, &rqd); | ||
1063 | |||
1064 | ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type); | ||
1065 | nvm_free_rqd_ppalist(tgt_dev, &rqd); | ||
1066 | if (ret) | ||
1067 | return -EINVAL; | ||
1068 | |||
1069 | return 0; | ||
877 | } | 1070 | } |
878 | EXPORT_SYMBOL(nvm_get_tgt_bb_tbl); | 1071 | EXPORT_SYMBOL_GPL(nvm_set_chunk_meta); |
879 | 1072 | ||
880 | static int nvm_core_init(struct nvm_dev *dev) | 1073 | static int nvm_core_init(struct nvm_dev *dev) |
881 | { | 1074 | { |