summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMatias Bjørling <mb@lightnvm.io>2018-10-09 07:11:36 -0400
committerJens Axboe <axboe@kernel.dk>2018-10-09 10:25:06 -0400
commitaff3fb18f957de93e629c7d3d2c4ef1f360aa511 (patch)
tree9a4d00e5ebdbad312cf5dc4ce3379097010ecb03 /drivers
parentd8adaa3b86324c6186d0adf74bc256bdacfffdb6 (diff)
lightnvm: move bad block and chunk state logic to core
pblk implements two data paths for recovery line state. One for 1.2 and another for 2.0, instead of having pblk implement these, combine them in the core to reduce complexity and make available to other targets. The new interface will adhere to the 2.0 chunk definition, including managing open chunks with an active write pointer. To provide this interface, a 1.2 device recovers the state of the chunks by manually detecting if a chunk is either free/open/close/offline, and if open, scanning the flash pages sequentially to find the next writeable page. This process takes on average ~10 seconds on a device with 64 dies, 1024 blocks and 60us read access time. The process can be parallelized but is left out for maintenance simplicity, as the 1.2 specification is deprecated. For 2.0 devices, the logic is maintained internally in the drive and retrieved through the 2.0 interface. Signed-off-by: Matias Bjørling <mb@lightnvm.io> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/lightnvm/core.c309
-rw-r--r--drivers/lightnvm/pblk-core.c6
-rw-r--r--drivers/lightnvm/pblk-init.c116
-rw-r--r--drivers/lightnvm/pblk.h2
-rw-r--r--drivers/nvme/host/lightnvm.c4
5 files changed, 260 insertions, 177 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 964352720a03..8df188e0767e 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -717,46 +717,6 @@ static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
717 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list); 717 nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
718} 718}
719 719
720int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct nvm_chk_meta *meta,
721 struct ppa_addr ppa, int nchks)
722{
723 struct nvm_dev *dev = tgt_dev->parent;
724
725 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
726
727 return dev->ops->get_chk_meta(tgt_dev->parent, meta,
728 (sector_t)ppa.ppa, nchks);
729}
730EXPORT_SYMBOL(nvm_get_chunk_meta);
731
732int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
733 int nr_ppas, int type)
734{
735 struct nvm_dev *dev = tgt_dev->parent;
736 struct nvm_rq rqd;
737 int ret;
738
739 if (nr_ppas > NVM_MAX_VLBA) {
740 pr_err("nvm: unable to update all blocks atomically\n");
741 return -EINVAL;
742 }
743
744 memset(&rqd, 0, sizeof(struct nvm_rq));
745
746 nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
747 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
748
749 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
750 nvm_free_rqd_ppalist(tgt_dev, &rqd);
751 if (ret) {
752 pr_err("nvm: failed bb mark\n");
753 return -EINVAL;
754 }
755
756 return 0;
757}
758EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
759
760static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd) 720static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
761{ 721{
762 int flags = 0; 722 int flags = 0;
@@ -830,27 +790,159 @@ void nvm_end_io(struct nvm_rq *rqd)
830} 790}
831EXPORT_SYMBOL(nvm_end_io); 791EXPORT_SYMBOL(nvm_end_io);
832 792
793static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
794{
795 if (!dev->ops->submit_io_sync)
796 return -ENODEV;
797
798 rqd->flags = nvm_set_flags(&dev->geo, rqd);
799
800 return dev->ops->submit_io_sync(dev, rqd);
801}
802
803static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
804{
805 struct nvm_rq rqd = { NULL };
806 struct bio bio;
807 struct bio_vec bio_vec;
808 struct page *page;
809 int ret;
810
811 page = alloc_page(GFP_KERNEL);
812 if (!page)
813 return -ENOMEM;
814
815 bio_init(&bio, &bio_vec, 1);
816 bio_add_page(&bio, page, PAGE_SIZE, 0);
817 bio_set_op_attrs(&bio, REQ_OP_READ, 0);
818
819 rqd.bio = &bio;
820 rqd.opcode = NVM_OP_PREAD;
821 rqd.is_seq = 1;
822 rqd.nr_ppas = 1;
823 rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
824
825 ret = nvm_submit_io_sync_raw(dev, &rqd);
826 if (ret)
827 return ret;
828
829 __free_page(page);
830
831 return rqd.error;
832}
833
833/* 834/*
834 * folds a bad block list from its plane representation to its virtual 835 * Scans a 1.2 chunk first and last page to determine if its state.
835 * block representation. The fold is done in place and reduced size is 836 * If the chunk is found to be open, also scan it to update the write
836 * returned. 837 * pointer.
837 *
838 * If any of the planes status are bad or grown bad block, the virtual block
839 * is marked bad. If not bad, the first plane state acts as the block state.
840 */ 838 */
841int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks) 839static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
840 struct nvm_chk_meta *meta)
842{ 841{
843 struct nvm_geo *geo = &dev->geo; 842 struct nvm_geo *geo = &dev->geo;
844 int blk, offset, pl, blktype; 843 int ret, pg, pl;
845 844
846 if (nr_blks != geo->num_chk * geo->pln_mode) 845 /* sense first page */
847 return -EINVAL; 846 ret = nvm_bb_chunk_sense(dev, ppa);
847 if (ret < 0) /* io error */
848 return ret;
849 else if (ret == 0) /* valid data */
850 meta->state = NVM_CHK_ST_OPEN;
851 else if (ret > 0) {
852 /*
853 * If empty page, the chunk is free, else it is an
854 * actual io error. In that case, mark it offline.
855 */
856 switch (ret) {
857 case NVM_RSP_ERR_EMPTYPAGE:
858 meta->state = NVM_CHK_ST_FREE;
859 return 0;
860 case NVM_RSP_ERR_FAILCRC:
861 case NVM_RSP_ERR_FAILECC:
862 case NVM_RSP_WARN_HIGHECC:
863 meta->state = NVM_CHK_ST_OPEN;
864 goto scan;
865 default:
866 return -ret; /* other io error */
867 }
868 }
869
870 /* sense last page */
871 ppa.g.pg = geo->num_pg - 1;
872 ppa.g.pl = geo->num_pln - 1;
873
874 ret = nvm_bb_chunk_sense(dev, ppa);
875 if (ret < 0) /* io error */
876 return ret;
877 else if (ret == 0) { /* Chunk fully written */
878 meta->state = NVM_CHK_ST_CLOSED;
879 meta->wp = geo->clba;
880 return 0;
881 } else if (ret > 0) {
882 switch (ret) {
883 case NVM_RSP_ERR_EMPTYPAGE:
884 case NVM_RSP_ERR_FAILCRC:
885 case NVM_RSP_ERR_FAILECC:
886 case NVM_RSP_WARN_HIGHECC:
887 meta->state = NVM_CHK_ST_OPEN;
888 break;
889 default:
890 return -ret; /* other io error */
891 }
892 }
893
894scan:
895 /*
896 * chunk is open, we scan sequentially to update the write pointer.
897 * We make the assumption that targets write data across all planes
898 * before moving to the next page.
899 */
900 for (pg = 0; pg < geo->num_pg; pg++) {
901 for (pl = 0; pl < geo->num_pln; pl++) {
902 ppa.g.pg = pg;
903 ppa.g.pl = pl;
904
905 ret = nvm_bb_chunk_sense(dev, ppa);
906 if (ret < 0) /* io error */
907 return ret;
908 else if (ret == 0) {
909 meta->wp += geo->ws_min;
910 } else if (ret > 0) {
911 switch (ret) {
912 case NVM_RSP_ERR_EMPTYPAGE:
913 return 0;
914 case NVM_RSP_ERR_FAILCRC:
915 case NVM_RSP_ERR_FAILECC:
916 case NVM_RSP_WARN_HIGHECC:
917 meta->wp += geo->ws_min;
918 break;
919 default:
920 return -ret; /* other io error */
921 }
922 }
923 }
924 }
925
926 return 0;
927}
928
929/*
930 * folds a bad block list from its plane representation to its
931 * chunk representation.
932 *
933 * If any of the planes status are bad or grown bad, the chunk is marked
934 * offline. If not bad, the first plane state acts as the chunk state.
935 */
936static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
937 u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
938{
939 struct nvm_geo *geo = &dev->geo;
940 int ret, blk, pl, offset, blktype;
848 941
849 for (blk = 0; blk < geo->num_chk; blk++) { 942 for (blk = 0; blk < geo->num_chk; blk++) {
850 offset = blk * geo->pln_mode; 943 offset = blk * geo->pln_mode;
851 blktype = blks[offset]; 944 blktype = blks[offset];
852 945
853 /* Bad blocks on any planes take precedence over other types */
854 for (pl = 0; pl < geo->pln_mode; pl++) { 946 for (pl = 0; pl < geo->pln_mode; pl++) {
855 if (blks[offset + pl] & 947 if (blks[offset + pl] &
856 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) { 948 (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
@@ -859,23 +951,124 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
859 } 951 }
860 } 952 }
861 953
862 blks[blk] = blktype; 954 ppa.g.blk = blk;
955
956 meta->wp = 0;
957 meta->type = NVM_CHK_TP_W_SEQ;
958 meta->wi = 0;
959 meta->slba = generic_to_dev_addr(dev, ppa).ppa;
960 meta->cnlb = dev->geo.clba;
961
962 if (blktype == NVM_BLK_T_FREE) {
963 ret = nvm_bb_chunk_scan(dev, ppa, meta);
964 if (ret)
965 return ret;
966 } else {
967 meta->state = NVM_CHK_ST_OFFLINE;
968 }
969
970 meta++;
863 } 971 }
864 972
865 return geo->num_chk; 973 return 0;
974}
975
976static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
977 int nchks, struct nvm_chk_meta *meta)
978{
979 struct nvm_geo *geo = &dev->geo;
980 struct ppa_addr ppa;
981 u8 *blks;
982 int ch, lun, nr_blks;
983 int ret;
984
985 ppa.ppa = slba;
986 ppa = dev_to_generic_addr(dev, ppa);
987
988 if (ppa.g.blk != 0)
989 return -EINVAL;
990
991 if ((nchks % geo->num_chk) != 0)
992 return -EINVAL;
993
994 nr_blks = geo->num_chk * geo->pln_mode;
995
996 blks = kmalloc(nr_blks, GFP_KERNEL);
997 if (!blks)
998 return -ENOMEM;
999
1000 for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
1001 for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
1002 struct ppa_addr ppa_gen, ppa_dev;
1003
1004 if (!nchks)
1005 goto done;
1006
1007 ppa_gen.ppa = 0;
1008 ppa_gen.g.ch = ch;
1009 ppa_gen.g.lun = lun;
1010 ppa_dev = generic_to_dev_addr(dev, ppa_gen);
1011
1012 ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
1013 if (ret)
1014 goto done;
1015
1016 ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
1017 meta);
1018 if (ret)
1019 goto done;
1020
1021 meta += geo->num_chk;
1022 nchks -= geo->num_chk;
1023 }
1024 }
1025done:
1026 kfree(blks);
1027 return ret;
866} 1028}
867EXPORT_SYMBOL(nvm_bb_tbl_fold);
868 1029
869int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa, 1030int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
870 u8 *blks) 1031 int nchks, struct nvm_chk_meta *meta)
871{ 1032{
872 struct nvm_dev *dev = tgt_dev->parent; 1033 struct nvm_dev *dev = tgt_dev->parent;
873 1034
874 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1); 1035 nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
875 1036
876 return dev->ops->get_bb_tbl(dev, ppa, blks); 1037 if (dev->geo.version == NVM_OCSSD_SPEC_12)
1038 return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1039
1040 return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1041}
1042EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
1043
1044int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
1045 int nr_ppas, int type)
1046{
1047 struct nvm_dev *dev = tgt_dev->parent;
1048 struct nvm_rq rqd;
1049 int ret;
1050
1051 if (dev->geo.version == NVM_OCSSD_SPEC_20)
1052 return 0;
1053
1054 if (nr_ppas > NVM_MAX_VLBA) {
1055 pr_err("nvm: unable to update all blocks atomically\n");
1056 return -EINVAL;
1057 }
1058
1059 memset(&rqd, 0, sizeof(struct nvm_rq));
1060
1061 nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
1062 nvm_rq_tgt_to_dev(tgt_dev, &rqd);
1063
1064 ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
1065 nvm_free_rqd_ppalist(tgt_dev, &rqd);
1066 if (ret)
1067 return -EINVAL;
1068
1069 return 0;
877} 1070}
878EXPORT_SYMBOL(nvm_get_tgt_bb_tbl); 1071EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
879 1072
880static int nvm_core_init(struct nvm_dev *dev) 1073static int nvm_core_init(struct nvm_dev *dev)
881{ 1074{
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 72de7456845b..e0b513d07e14 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -27,7 +27,7 @@ static void pblk_line_mark_bb(struct work_struct *work)
27 struct ppa_addr *ppa = line_ws->priv; 27 struct ppa_addr *ppa = line_ws->priv;
28 int ret; 28 int ret;
29 29
30 ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD); 30 ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
31 if (ret) { 31 if (ret) {
32 struct pblk_line *line; 32 struct pblk_line *line;
33 int pos; 33 int pos;
@@ -110,7 +110,7 @@ static void pblk_end_io_erase(struct nvm_rq *rqd)
110 * 110 *
111 * The caller is responsible for freeing the returned structure 111 * The caller is responsible for freeing the returned structure
112 */ 112 */
113struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk) 113struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
114{ 114{
115 struct nvm_tgt_dev *dev = pblk->dev; 115 struct nvm_tgt_dev *dev = pblk->dev;
116 struct nvm_geo *geo = &dev->geo; 116 struct nvm_geo *geo = &dev->geo;
@@ -126,7 +126,7 @@ struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk)
126 if (!meta) 126 if (!meta)
127 return ERR_PTR(-ENOMEM); 127 return ERR_PTR(-ENOMEM);
128 128
129 ret = nvm_get_chunk_meta(dev, meta, ppa, geo->all_chunks); 129 ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
130 if (ret) { 130 if (ret) {
131 kfree(meta); 131 kfree(meta);
132 return ERR_PTR(-EIO); 132 return ERR_PTR(-EIO);
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 039f62d05e84..53bd52114aee 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -540,67 +540,6 @@ static void pblk_lines_free(struct pblk *pblk)
540 kfree(pblk->lines); 540 kfree(pblk->lines);
541} 541}
542 542
543static int pblk_bb_get_tbl(struct nvm_tgt_dev *dev, struct pblk_lun *rlun,
544 u8 *blks, int nr_blks)
545{
546 struct ppa_addr ppa;
547 int ret;
548
549 ppa.ppa = 0;
550 ppa.g.ch = rlun->bppa.g.ch;
551 ppa.g.lun = rlun->bppa.g.lun;
552
553 ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
554 if (ret)
555 return ret;
556
557 nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
558 if (nr_blks < 0)
559 return -EIO;
560
561 return 0;
562}
563
564static void *pblk_bb_get_meta(struct pblk *pblk)
565{
566 struct nvm_tgt_dev *dev = pblk->dev;
567 struct nvm_geo *geo = &dev->geo;
568 u8 *meta;
569 int i, nr_blks, blk_per_lun;
570 int ret;
571
572 blk_per_lun = geo->num_chk * geo->pln_mode;
573 nr_blks = blk_per_lun * geo->all_luns;
574
575 meta = kmalloc(nr_blks, GFP_KERNEL);
576 if (!meta)
577 return ERR_PTR(-ENOMEM);
578
579 for (i = 0; i < geo->all_luns; i++) {
580 struct pblk_lun *rlun = &pblk->luns[i];
581 u8 *meta_pos = meta + i * blk_per_lun;
582
583 ret = pblk_bb_get_tbl(dev, rlun, meta_pos, blk_per_lun);
584 if (ret) {
585 kfree(meta);
586 return ERR_PTR(-EIO);
587 }
588 }
589
590 return meta;
591}
592
593static void *pblk_chunk_get_meta(struct pblk *pblk)
594{
595 struct nvm_tgt_dev *dev = pblk->dev;
596 struct nvm_geo *geo = &dev->geo;
597
598 if (geo->version == NVM_OCSSD_SPEC_12)
599 return pblk_bb_get_meta(pblk);
600 else
601 return pblk_chunk_get_info(pblk);
602}
603
604static int pblk_luns_init(struct pblk *pblk) 543static int pblk_luns_init(struct pblk *pblk)
605{ 544{
606 struct nvm_tgt_dev *dev = pblk->dev; 545 struct nvm_tgt_dev *dev = pblk->dev;
@@ -699,51 +638,7 @@ static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
699 atomic_set(&pblk->rl.free_user_blocks, nr_free_blks); 638 atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
700} 639}
701 640
702static int pblk_setup_line_meta_12(struct pblk *pblk, struct pblk_line *line, 641static int pblk_setup_line_meta_chk(struct pblk *pblk, struct pblk_line *line,
703 void *chunk_meta)
704{
705 struct nvm_tgt_dev *dev = pblk->dev;
706 struct nvm_geo *geo = &dev->geo;
707 struct pblk_line_meta *lm = &pblk->lm;
708 int i, chk_per_lun, nr_bad_chks = 0;
709
710 chk_per_lun = geo->num_chk * geo->pln_mode;
711
712 for (i = 0; i < lm->blk_per_line; i++) {
713 struct pblk_lun *rlun = &pblk->luns[i];
714 struct nvm_chk_meta *chunk;
715 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
716 u8 *lun_bb_meta = chunk_meta + pos * chk_per_lun;
717
718 chunk = &line->chks[pos];
719
720 /*
721 * In 1.2 spec. chunk state is not persisted by the device. Thus
722 * some of the values are reset each time pblk is instantiated,
723 * so we have to assume that the block is closed.
724 */
725 if (lun_bb_meta[line->id] == NVM_BLK_T_FREE)
726 chunk->state = NVM_CHK_ST_CLOSED;
727 else
728 chunk->state = NVM_CHK_ST_OFFLINE;
729
730 chunk->type = NVM_CHK_TP_W_SEQ;
731 chunk->wi = 0;
732 chunk->slba = -1;
733 chunk->cnlb = geo->clba;
734 chunk->wp = 0;
735
736 if (!(chunk->state & NVM_CHK_ST_OFFLINE))
737 continue;
738
739 set_bit(pos, line->blk_bitmap);
740 nr_bad_chks++;
741 }
742
743 return nr_bad_chks;
744}
745
746static int pblk_setup_line_meta_20(struct pblk *pblk, struct pblk_line *line,
747 struct nvm_chk_meta *meta) 642 struct nvm_chk_meta *meta)
748{ 643{
749 struct nvm_tgt_dev *dev = pblk->dev; 644 struct nvm_tgt_dev *dev = pblk->dev;
@@ -790,8 +685,6 @@ static int pblk_setup_line_meta_20(struct pblk *pblk, struct pblk_line *line,
790static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line, 685static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
791 void *chunk_meta, int line_id) 686 void *chunk_meta, int line_id)
792{ 687{
793 struct nvm_tgt_dev *dev = pblk->dev;
794 struct nvm_geo *geo = &dev->geo;
795 struct pblk_line_mgmt *l_mg = &pblk->l_mg; 688 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
796 struct pblk_line_meta *lm = &pblk->lm; 689 struct pblk_line_meta *lm = &pblk->lm;
797 long nr_bad_chks, chk_in_line; 690 long nr_bad_chks, chk_in_line;
@@ -804,10 +697,7 @@ static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
804 line->vsc = &l_mg->vsc_list[line_id]; 697 line->vsc = &l_mg->vsc_list[line_id];
805 spin_lock_init(&line->lock); 698 spin_lock_init(&line->lock);
806 699
807 if (geo->version == NVM_OCSSD_SPEC_12) 700 nr_bad_chks = pblk_setup_line_meta_chk(pblk, line, chunk_meta);
808 nr_bad_chks = pblk_setup_line_meta_12(pblk, line, chunk_meta);
809 else
810 nr_bad_chks = pblk_setup_line_meta_20(pblk, line, chunk_meta);
811 701
812 chk_in_line = lm->blk_per_line - nr_bad_chks; 702 chk_in_line = lm->blk_per_line - nr_bad_chks;
813 if (nr_bad_chks < 0 || nr_bad_chks > lm->blk_per_line || 703 if (nr_bad_chks < 0 || nr_bad_chks > lm->blk_per_line ||
@@ -1058,7 +948,7 @@ static int pblk_lines_init(struct pblk *pblk)
1058 if (ret) 948 if (ret)
1059 goto fail_free_meta; 949 goto fail_free_meta;
1060 950
1061 chunk_meta = pblk_chunk_get_meta(pblk); 951 chunk_meta = pblk_get_chunk_meta(pblk);
1062 if (IS_ERR(chunk_meta)) { 952 if (IS_ERR(chunk_meta)) {
1063 ret = PTR_ERR(chunk_meta); 953 ret = PTR_ERR(chunk_meta);
1064 goto fail_free_luns; 954 goto fail_free_luns;
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 48b3035df3c4..579b4ea9716c 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -774,7 +774,7 @@ void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
774int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd, 774int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
775 struct pblk_c_ctx *c_ctx); 775 struct pblk_c_ctx *c_ctx);
776void pblk_discard(struct pblk *pblk, struct bio *bio); 776void pblk_discard(struct pblk *pblk, struct bio *bio);
777struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk); 777struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk);
778struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk, 778struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
779 struct nvm_chk_meta *lp, 779 struct nvm_chk_meta *lp,
780 struct ppa_addr ppa); 780 struct ppa_addr ppa);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 1e4f97538838..e42af7771fe5 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -567,8 +567,8 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
567 * Expect the lba in device format 567 * Expect the lba in device format
568 */ 568 */
569static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev, 569static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
570 struct nvm_chk_meta *meta, 570 sector_t slba, int nchks,
571 sector_t slba, int nchks) 571 struct nvm_chk_meta *meta)
572{ 572{
573 struct nvm_geo *geo = &ndev->geo; 573 struct nvm_geo *geo = &ndev->geo;
574 struct nvme_ns *ns = ndev->q->queuedata; 574 struct nvme_ns *ns = ndev->q->queuedata;