diff options
Diffstat (limited to 'drivers/lightnvm/pblk.h')
-rw-r--r-- | drivers/lightnvm/pblk.h | 304 |
1 files changed, 220 insertions, 84 deletions
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 8c357fb6538e..9c682acfc5d1 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h | |||
@@ -201,12 +201,6 @@ struct pblk_rb { | |||
201 | 201 | ||
202 | struct pblk_lun { | 202 | struct pblk_lun { |
203 | struct ppa_addr bppa; | 203 | struct ppa_addr bppa; |
204 | |||
205 | u8 *bb_list; /* Bad block list for LUN. Only used on | ||
206 | * bring up. Bad blocks are managed | ||
207 | * within lines on run-time. | ||
208 | */ | ||
209 | |||
210 | struct semaphore wr_sem; | 204 | struct semaphore wr_sem; |
211 | }; | 205 | }; |
212 | 206 | ||
@@ -303,6 +297,7 @@ enum { | |||
303 | PBLK_LINETYPE_DATA = 2, | 297 | PBLK_LINETYPE_DATA = 2, |
304 | 298 | ||
305 | /* Line state */ | 299 | /* Line state */ |
300 | PBLK_LINESTATE_NEW = 9, | ||
306 | PBLK_LINESTATE_FREE = 10, | 301 | PBLK_LINESTATE_FREE = 10, |
307 | PBLK_LINESTATE_OPEN = 11, | 302 | PBLK_LINESTATE_OPEN = 11, |
308 | PBLK_LINESTATE_CLOSED = 12, | 303 | PBLK_LINESTATE_CLOSED = 12, |
@@ -320,14 +315,26 @@ enum { | |||
320 | }; | 315 | }; |
321 | 316 | ||
322 | #define PBLK_MAGIC 0x70626c6b /*pblk*/ | 317 | #define PBLK_MAGIC 0x70626c6b /*pblk*/ |
323 | #define SMETA_VERSION cpu_to_le16(1) | 318 | |
319 | /* emeta/smeta persistent storage format versions: | ||
320 | * Changes in major version requires offline migration. | ||
321 | * Changes in minor version are handled automatically during | ||
322 | * recovery. | ||
323 | */ | ||
324 | |||
325 | #define SMETA_VERSION_MAJOR (0) | ||
326 | #define SMETA_VERSION_MINOR (1) | ||
327 | |||
328 | #define EMETA_VERSION_MAJOR (0) | ||
329 | #define EMETA_VERSION_MINOR (2) | ||
324 | 330 | ||
325 | struct line_header { | 331 | struct line_header { |
326 | __le32 crc; | 332 | __le32 crc; |
327 | __le32 identifier; /* pblk identifier */ | 333 | __le32 identifier; /* pblk identifier */ |
328 | __u8 uuid[16]; /* instance uuid */ | 334 | __u8 uuid[16]; /* instance uuid */ |
329 | __le16 type; /* line type */ | 335 | __le16 type; /* line type */ |
330 | __le16 version; /* type version */ | 336 | __u8 version_major; /* version major */ |
337 | __u8 version_minor; /* version minor */ | ||
331 | __le32 id; /* line id for current line */ | 338 | __le32 id; /* line id for current line */ |
332 | }; | 339 | }; |
333 | 340 | ||
@@ -349,11 +356,13 @@ struct line_smeta { | |||
349 | __le64 lun_bitmap[]; | 356 | __le64 lun_bitmap[]; |
350 | }; | 357 | }; |
351 | 358 | ||
359 | |||
352 | /* | 360 | /* |
353 | * Metadata layout in media: | 361 | * Metadata layout in media: |
354 | * First sector: | 362 | * First sector: |
355 | * 1. struct line_emeta | 363 | * 1. struct line_emeta |
356 | * 2. bad block bitmap (u64 * window_wr_lun) | 364 | * 2. bad block bitmap (u64 * window_wr_lun) |
365 | * 3. write amplification counters | ||
357 | * Mid sectors (start at lbas_sector): | 366 | * Mid sectors (start at lbas_sector): |
358 | * 3. nr_lbas (u64) forming lba list | 367 | * 3. nr_lbas (u64) forming lba list |
359 | * Last sectors (start at vsc_sector): | 368 | * Last sectors (start at vsc_sector): |
@@ -377,7 +386,15 @@ struct line_emeta { | |||
377 | __le32 next_id; /* Line id for next line */ | 386 | __le32 next_id; /* Line id for next line */ |
378 | __le64 nr_lbas; /* Number of lbas mapped in line */ | 387 | __le64 nr_lbas; /* Number of lbas mapped in line */ |
379 | __le64 nr_valid_lbas; /* Number of valid lbas mapped in line */ | 388 | __le64 nr_valid_lbas; /* Number of valid lbas mapped in line */ |
380 | __le64 bb_bitmap[]; /* Updated bad block bitmap for line */ | 389 | __le64 bb_bitmap[]; /* Updated bad block bitmap for line */ |
390 | }; | ||
391 | |||
392 | |||
393 | /* Write amplification counters stored on media */ | ||
394 | struct wa_counters { | ||
395 | __le64 user; /* Number of user written sectors */ | ||
396 | __le64 gc; /* Number of sectors written by GC*/ | ||
397 | __le64 pad; /* Number of padded sectors */ | ||
381 | }; | 398 | }; |
382 | 399 | ||
383 | struct pblk_emeta { | 400 | struct pblk_emeta { |
@@ -410,6 +427,8 @@ struct pblk_line { | |||
410 | 427 | ||
411 | unsigned long *lun_bitmap; /* Bitmap for LUNs mapped in line */ | 428 | unsigned long *lun_bitmap; /* Bitmap for LUNs mapped in line */ |
412 | 429 | ||
430 | struct nvm_chk_meta *chks; /* Chunks forming line */ | ||
431 | |||
413 | struct pblk_smeta *smeta; /* Start metadata */ | 432 | struct pblk_smeta *smeta; /* Start metadata */ |
414 | struct pblk_emeta *emeta; /* End medatada */ | 433 | struct pblk_emeta *emeta; /* End medatada */ |
415 | 434 | ||
@@ -507,10 +526,11 @@ struct pblk_line_meta { | |||
507 | unsigned int smeta_sec; /* Sectors needed for smeta */ | 526 | unsigned int smeta_sec; /* Sectors needed for smeta */ |
508 | 527 | ||
509 | unsigned int emeta_len[4]; /* Lengths for emeta: | 528 | unsigned int emeta_len[4]; /* Lengths for emeta: |
510 | * [0]: Total length | 529 | * [0]: Total |
511 | * [1]: struct line_emeta length | 530 | * [1]: struct line_emeta + |
512 | * [2]: L2P portion length | 531 | * bb_bitmap + struct wa_counters |
513 | * [3]: vsc list length | 532 | * [2]: L2P portion |
533 | * [3]: vsc | ||
514 | */ | 534 | */ |
515 | unsigned int emeta_sec[4]; /* Sectors needed for emeta. Same layout | 535 | unsigned int emeta_sec[4]; /* Sectors needed for emeta. Same layout |
516 | * as emeta_len | 536 | * as emeta_len |
@@ -534,21 +554,6 @@ struct pblk_line_meta { | |||
534 | unsigned int meta_distance; /* Distance between data and metadata */ | 554 | unsigned int meta_distance; /* Distance between data and metadata */ |
535 | }; | 555 | }; |
536 | 556 | ||
537 | struct pblk_addr_format { | ||
538 | u64 ch_mask; | ||
539 | u64 lun_mask; | ||
540 | u64 pln_mask; | ||
541 | u64 blk_mask; | ||
542 | u64 pg_mask; | ||
543 | u64 sec_mask; | ||
544 | u8 ch_offset; | ||
545 | u8 lun_offset; | ||
546 | u8 pln_offset; | ||
547 | u8 blk_offset; | ||
548 | u8 pg_offset; | ||
549 | u8 sec_offset; | ||
550 | }; | ||
551 | |||
552 | enum { | 557 | enum { |
553 | PBLK_STATE_RUNNING = 0, | 558 | PBLK_STATE_RUNNING = 0, |
554 | PBLK_STATE_STOPPING = 1, | 559 | PBLK_STATE_STOPPING = 1, |
@@ -556,6 +561,18 @@ enum { | |||
556 | PBLK_STATE_STOPPED = 3, | 561 | PBLK_STATE_STOPPED = 3, |
557 | }; | 562 | }; |
558 | 563 | ||
564 | /* Internal format to support not power-of-2 device formats */ | ||
565 | struct pblk_addrf { | ||
566 | /* gen to dev */ | ||
567 | int sec_stripe; | ||
568 | int ch_stripe; | ||
569 | int lun_stripe; | ||
570 | |||
571 | /* dev to gen */ | ||
572 | int sec_lun_stripe; | ||
573 | int sec_ws_stripe; | ||
574 | }; | ||
575 | |||
559 | struct pblk { | 576 | struct pblk { |
560 | struct nvm_tgt_dev *dev; | 577 | struct nvm_tgt_dev *dev; |
561 | struct gendisk *disk; | 578 | struct gendisk *disk; |
@@ -568,8 +585,9 @@ struct pblk { | |||
568 | struct pblk_line_mgmt l_mg; /* Line management */ | 585 | struct pblk_line_mgmt l_mg; /* Line management */ |
569 | struct pblk_line_meta lm; /* Line metadata */ | 586 | struct pblk_line_meta lm; /* Line metadata */ |
570 | 587 | ||
571 | int ppaf_bitsize; | 588 | struct nvm_addrf addrf; /* Aligned address format */ |
572 | struct pblk_addr_format ppaf; | 589 | struct pblk_addrf uaddrf; /* Unaligned address format */ |
590 | int addrf_len; | ||
573 | 591 | ||
574 | struct pblk_rb rwb; | 592 | struct pblk_rb rwb; |
575 | 593 | ||
@@ -592,12 +610,27 @@ struct pblk { | |||
592 | int sec_per_write; | 610 | int sec_per_write; |
593 | 611 | ||
594 | unsigned char instance_uuid[16]; | 612 | unsigned char instance_uuid[16]; |
613 | |||
614 | /* Persistent write amplification counters, 4kb sector I/Os */ | ||
615 | atomic64_t user_wa; /* Sectors written by user */ | ||
616 | atomic64_t gc_wa; /* Sectors written by GC */ | ||
617 | atomic64_t pad_wa; /* Padded sectors written */ | ||
618 | |||
619 | /* Reset values for delta write amplification measurements */ | ||
620 | u64 user_rst_wa; | ||
621 | u64 gc_rst_wa; | ||
622 | u64 pad_rst_wa; | ||
623 | |||
624 | /* Counters used for calculating padding distribution */ | ||
625 | atomic64_t *pad_dist; /* Padding distribution buckets */ | ||
626 | u64 nr_flush_rst; /* Flushes reset value for pad dist.*/ | ||
627 | atomic64_t nr_flush; /* Number of flush/fua I/O */ | ||
628 | |||
595 | #ifdef CONFIG_NVM_DEBUG | 629 | #ifdef CONFIG_NVM_DEBUG |
596 | /* All debug counters apply to 4kb sector I/Os */ | 630 | /* Non-persistent debug counters, 4kb sector I/Os */ |
597 | atomic_long_t inflight_writes; /* Inflight writes (user and gc) */ | 631 | atomic_long_t inflight_writes; /* Inflight writes (user and gc) */ |
598 | atomic_long_t padded_writes; /* Sectors padded due to flush/fua */ | 632 | atomic_long_t padded_writes; /* Sectors padded due to flush/fua */ |
599 | atomic_long_t padded_wb; /* Sectors padded in write buffer */ | 633 | atomic_long_t padded_wb; /* Sectors padded in write buffer */ |
600 | atomic_long_t nr_flush; /* Number of flush/fua I/O */ | ||
601 | atomic_long_t req_writes; /* Sectors stored on write buffer */ | 634 | atomic_long_t req_writes; /* Sectors stored on write buffer */ |
602 | atomic_long_t sub_writes; /* Sectors submitted from buffer */ | 635 | atomic_long_t sub_writes; /* Sectors submitted from buffer */ |
603 | atomic_long_t sync_writes; /* Sectors synced to media */ | 636 | atomic_long_t sync_writes; /* Sectors synced to media */ |
@@ -712,6 +745,10 @@ void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write); | |||
712 | int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd, | 745 | int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd, |
713 | struct pblk_c_ctx *c_ctx); | 746 | struct pblk_c_ctx *c_ctx); |
714 | void pblk_discard(struct pblk *pblk, struct bio *bio); | 747 | void pblk_discard(struct pblk *pblk, struct bio *bio); |
748 | struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk); | ||
749 | struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk, | ||
750 | struct nvm_chk_meta *lp, | ||
751 | struct ppa_addr ppa); | ||
715 | void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd); | 752 | void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd); |
716 | void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd); | 753 | void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd); |
717 | int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd); | 754 | int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd); |
@@ -888,6 +925,12 @@ static inline void *emeta_to_bb(struct line_emeta *emeta) | |||
888 | return emeta->bb_bitmap; | 925 | return emeta->bb_bitmap; |
889 | } | 926 | } |
890 | 927 | ||
928 | static inline void *emeta_to_wa(struct pblk_line_meta *lm, | ||
929 | struct line_emeta *emeta) | ||
930 | { | ||
931 | return emeta->bb_bitmap + lm->blk_bitmap_len; | ||
932 | } | ||
933 | |||
891 | static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta) | 934 | static inline void *emeta_to_lbas(struct pblk *pblk, struct line_emeta *emeta) |
892 | { | 935 | { |
893 | return ((void *)emeta + pblk->lm.emeta_len[1]); | 936 | return ((void *)emeta + pblk->lm.emeta_len[1]); |
@@ -903,38 +946,60 @@ static inline int pblk_line_vsc(struct pblk_line *line) | |||
903 | return le32_to_cpu(*line->vsc); | 946 | return le32_to_cpu(*line->vsc); |
904 | } | 947 | } |
905 | 948 | ||
906 | #define NVM_MEM_PAGE_WRITE (8) | ||
907 | |||
908 | static inline int pblk_pad_distance(struct pblk *pblk) | 949 | static inline int pblk_pad_distance(struct pblk *pblk) |
909 | { | 950 | { |
910 | struct nvm_tgt_dev *dev = pblk->dev; | 951 | struct nvm_tgt_dev *dev = pblk->dev; |
911 | struct nvm_geo *geo = &dev->geo; | 952 | struct nvm_geo *geo = &dev->geo; |
912 | 953 | ||
913 | return NVM_MEM_PAGE_WRITE * geo->all_luns * geo->sec_per_pl; | 954 | return geo->mw_cunits * geo->all_luns * geo->ws_opt; |
914 | } | 955 | } |
915 | 956 | ||
916 | static inline int pblk_ppa_to_line(struct ppa_addr p) | 957 | static inline int pblk_ppa_to_line(struct ppa_addr p) |
917 | { | 958 | { |
918 | return p.g.blk; | 959 | return p.a.blk; |
919 | } | 960 | } |
920 | 961 | ||
921 | static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p) | 962 | static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p) |
922 | { | 963 | { |
923 | return p.g.lun * geo->nr_chnls + p.g.ch; | 964 | return p.a.lun * geo->num_ch + p.a.ch; |
924 | } | 965 | } |
925 | 966 | ||
926 | static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr, | 967 | static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr, |
927 | u64 line_id) | 968 | u64 line_id) |
928 | { | 969 | { |
970 | struct nvm_tgt_dev *dev = pblk->dev; | ||
971 | struct nvm_geo *geo = &dev->geo; | ||
929 | struct ppa_addr ppa; | 972 | struct ppa_addr ppa; |
930 | 973 | ||
931 | ppa.ppa = 0; | 974 | if (geo->version == NVM_OCSSD_SPEC_12) { |
932 | ppa.g.blk = line_id; | 975 | struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf; |
933 | ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset; | 976 | |
934 | ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset; | 977 | ppa.ppa = 0; |
935 | ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset; | 978 | ppa.g.blk = line_id; |
936 | ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset; | 979 | ppa.g.pg = (paddr & ppaf->pg_mask) >> ppaf->pg_offset; |
937 | ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset; | 980 | ppa.g.lun = (paddr & ppaf->lun_mask) >> ppaf->lun_offset; |
981 | ppa.g.ch = (paddr & ppaf->ch_mask) >> ppaf->ch_offset; | ||
982 | ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset; | ||
983 | ppa.g.sec = (paddr & ppaf->sec_mask) >> ppaf->sec_offset; | ||
984 | } else { | ||
985 | struct pblk_addrf *uaddrf = &pblk->uaddrf; | ||
986 | int secs, chnls, luns; | ||
987 | |||
988 | ppa.ppa = 0; | ||
989 | |||
990 | ppa.m.chk = line_id; | ||
991 | |||
992 | paddr = div_u64_rem(paddr, uaddrf->sec_stripe, &secs); | ||
993 | ppa.m.sec = secs; | ||
994 | |||
995 | paddr = div_u64_rem(paddr, uaddrf->ch_stripe, &chnls); | ||
996 | ppa.m.grp = chnls; | ||
997 | |||
998 | paddr = div_u64_rem(paddr, uaddrf->lun_stripe, &luns); | ||
999 | ppa.m.pu = luns; | ||
1000 | |||
1001 | ppa.m.sec += uaddrf->sec_stripe * paddr; | ||
1002 | } | ||
938 | 1003 | ||
939 | return ppa; | 1004 | return ppa; |
940 | } | 1005 | } |
@@ -942,13 +1007,30 @@ static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr, | |||
942 | static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk, | 1007 | static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk, |
943 | struct ppa_addr p) | 1008 | struct ppa_addr p) |
944 | { | 1009 | { |
1010 | struct nvm_tgt_dev *dev = pblk->dev; | ||
1011 | struct nvm_geo *geo = &dev->geo; | ||
945 | u64 paddr; | 1012 | u64 paddr; |
946 | 1013 | ||
947 | paddr = (u64)p.g.pg << pblk->ppaf.pg_offset; | 1014 | if (geo->version == NVM_OCSSD_SPEC_12) { |
948 | paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset; | 1015 | struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf; |
949 | paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset; | 1016 | |
950 | paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset; | 1017 | paddr = (u64)p.g.ch << ppaf->ch_offset; |
951 | paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset; | 1018 | paddr |= (u64)p.g.lun << ppaf->lun_offset; |
1019 | paddr |= (u64)p.g.pg << ppaf->pg_offset; | ||
1020 | paddr |= (u64)p.g.pl << ppaf->pln_offset; | ||
1021 | paddr |= (u64)p.g.sec << ppaf->sec_offset; | ||
1022 | } else { | ||
1023 | struct pblk_addrf *uaddrf = &pblk->uaddrf; | ||
1024 | u64 secs = p.m.sec; | ||
1025 | int sec_stripe; | ||
1026 | |||
1027 | paddr = (u64)p.m.grp * uaddrf->sec_stripe; | ||
1028 | paddr += (u64)p.m.pu * uaddrf->sec_lun_stripe; | ||
1029 | |||
1030 | secs = div_u64_rem(secs, uaddrf->sec_stripe, &sec_stripe); | ||
1031 | paddr += secs * uaddrf->sec_ws_stripe; | ||
1032 | paddr += sec_stripe; | ||
1033 | } | ||
952 | 1034 | ||
953 | return paddr; | 1035 | return paddr; |
954 | } | 1036 | } |
@@ -965,18 +1047,37 @@ static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32) | |||
965 | ppa64.c.line = ppa32 & ((~0U) >> 1); | 1047 | ppa64.c.line = ppa32 & ((~0U) >> 1); |
966 | ppa64.c.is_cached = 1; | 1048 | ppa64.c.is_cached = 1; |
967 | } else { | 1049 | } else { |
968 | ppa64.g.blk = (ppa32 & pblk->ppaf.blk_mask) >> | 1050 | struct nvm_tgt_dev *dev = pblk->dev; |
969 | pblk->ppaf.blk_offset; | 1051 | struct nvm_geo *geo = &dev->geo; |
970 | ppa64.g.pg = (ppa32 & pblk->ppaf.pg_mask) >> | 1052 | |
971 | pblk->ppaf.pg_offset; | 1053 | if (geo->version == NVM_OCSSD_SPEC_12) { |
972 | ppa64.g.lun = (ppa32 & pblk->ppaf.lun_mask) >> | 1054 | struct nvm_addrf_12 *ppaf = |
973 | pblk->ppaf.lun_offset; | 1055 | (struct nvm_addrf_12 *)&pblk->addrf; |
974 | ppa64.g.ch = (ppa32 & pblk->ppaf.ch_mask) >> | 1056 | |
975 | pblk->ppaf.ch_offset; | 1057 | ppa64.g.ch = (ppa32 & ppaf->ch_mask) >> |
976 | ppa64.g.pl = (ppa32 & pblk->ppaf.pln_mask) >> | 1058 | ppaf->ch_offset; |
977 | pblk->ppaf.pln_offset; | 1059 | ppa64.g.lun = (ppa32 & ppaf->lun_mask) >> |
978 | ppa64.g.sec = (ppa32 & pblk->ppaf.sec_mask) >> | 1060 | ppaf->lun_offset; |
979 | pblk->ppaf.sec_offset; | 1061 | ppa64.g.blk = (ppa32 & ppaf->blk_mask) >> |
1062 | ppaf->blk_offset; | ||
1063 | ppa64.g.pg = (ppa32 & ppaf->pg_mask) >> | ||
1064 | ppaf->pg_offset; | ||
1065 | ppa64.g.pl = (ppa32 & ppaf->pln_mask) >> | ||
1066 | ppaf->pln_offset; | ||
1067 | ppa64.g.sec = (ppa32 & ppaf->sec_mask) >> | ||
1068 | ppaf->sec_offset; | ||
1069 | } else { | ||
1070 | struct nvm_addrf *lbaf = &pblk->addrf; | ||
1071 | |||
1072 | ppa64.m.grp = (ppa32 & lbaf->ch_mask) >> | ||
1073 | lbaf->ch_offset; | ||
1074 | ppa64.m.pu = (ppa32 & lbaf->lun_mask) >> | ||
1075 | lbaf->lun_offset; | ||
1076 | ppa64.m.chk = (ppa32 & lbaf->chk_mask) >> | ||
1077 | lbaf->chk_offset; | ||
1078 | ppa64.m.sec = (ppa32 & lbaf->sec_mask) >> | ||
1079 | lbaf->sec_offset; | ||
1080 | } | ||
980 | } | 1081 | } |
981 | 1082 | ||
982 | return ppa64; | 1083 | return ppa64; |
@@ -992,12 +1093,27 @@ static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64) | |||
992 | ppa32 |= ppa64.c.line; | 1093 | ppa32 |= ppa64.c.line; |
993 | ppa32 |= 1U << 31; | 1094 | ppa32 |= 1U << 31; |
994 | } else { | 1095 | } else { |
995 | ppa32 |= ppa64.g.blk << pblk->ppaf.blk_offset; | 1096 | struct nvm_tgt_dev *dev = pblk->dev; |
996 | ppa32 |= ppa64.g.pg << pblk->ppaf.pg_offset; | 1097 | struct nvm_geo *geo = &dev->geo; |
997 | ppa32 |= ppa64.g.lun << pblk->ppaf.lun_offset; | 1098 | |
998 | ppa32 |= ppa64.g.ch << pblk->ppaf.ch_offset; | 1099 | if (geo->version == NVM_OCSSD_SPEC_12) { |
999 | ppa32 |= ppa64.g.pl << pblk->ppaf.pln_offset; | 1100 | struct nvm_addrf_12 *ppaf = |
1000 | ppa32 |= ppa64.g.sec << pblk->ppaf.sec_offset; | 1101 | (struct nvm_addrf_12 *)&pblk->addrf; |
1102 | |||
1103 | ppa32 |= ppa64.g.ch << ppaf->ch_offset; | ||
1104 | ppa32 |= ppa64.g.lun << ppaf->lun_offset; | ||
1105 | ppa32 |= ppa64.g.blk << ppaf->blk_offset; | ||
1106 | ppa32 |= ppa64.g.pg << ppaf->pg_offset; | ||
1107 | ppa32 |= ppa64.g.pl << ppaf->pln_offset; | ||
1108 | ppa32 |= ppa64.g.sec << ppaf->sec_offset; | ||
1109 | } else { | ||
1110 | struct nvm_addrf *lbaf = &pblk->addrf; | ||
1111 | |||
1112 | ppa32 |= ppa64.m.grp << lbaf->ch_offset; | ||
1113 | ppa32 |= ppa64.m.pu << lbaf->lun_offset; | ||
1114 | ppa32 |= ppa64.m.chk << lbaf->chk_offset; | ||
1115 | ppa32 |= ppa64.m.sec << lbaf->sec_offset; | ||
1116 | } | ||
1001 | } | 1117 | } |
1002 | 1118 | ||
1003 | return ppa32; | 1119 | return ppa32; |
@@ -1008,7 +1124,7 @@ static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk, | |||
1008 | { | 1124 | { |
1009 | struct ppa_addr ppa; | 1125 | struct ppa_addr ppa; |
1010 | 1126 | ||
1011 | if (pblk->ppaf_bitsize < 32) { | 1127 | if (pblk->addrf_len < 32) { |
1012 | u32 *map = (u32 *)pblk->trans_map; | 1128 | u32 *map = (u32 *)pblk->trans_map; |
1013 | 1129 | ||
1014 | ppa = pblk_ppa32_to_ppa64(pblk, map[lba]); | 1130 | ppa = pblk_ppa32_to_ppa64(pblk, map[lba]); |
@@ -1024,7 +1140,7 @@ static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk, | |||
1024 | static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba, | 1140 | static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba, |
1025 | struct ppa_addr ppa) | 1141 | struct ppa_addr ppa) |
1026 | { | 1142 | { |
1027 | if (pblk->ppaf_bitsize < 32) { | 1143 | if (pblk->addrf_len < 32) { |
1028 | u32 *map = (u32 *)pblk->trans_map; | 1144 | u32 *map = (u32 *)pblk->trans_map; |
1029 | 1145 | ||
1030 | map[lba] = pblk_ppa64_to_ppa32(pblk, ppa); | 1146 | map[lba] = pblk_ppa64_to_ppa32(pblk, ppa); |
@@ -1115,7 +1231,10 @@ static inline int pblk_set_progr_mode(struct pblk *pblk, int type) | |||
1115 | struct nvm_geo *geo = &dev->geo; | 1231 | struct nvm_geo *geo = &dev->geo; |
1116 | int flags; | 1232 | int flags; |
1117 | 1233 | ||
1118 | flags = geo->plane_mode >> 1; | 1234 | if (geo->version == NVM_OCSSD_SPEC_20) |
1235 | return 0; | ||
1236 | |||
1237 | flags = geo->pln_mode >> 1; | ||
1119 | 1238 | ||
1120 | if (type == PBLK_WRITE) | 1239 | if (type == PBLK_WRITE) |
1121 | flags |= NVM_IO_SCRAMBLE_ENABLE; | 1240 | flags |= NVM_IO_SCRAMBLE_ENABLE; |
@@ -1134,9 +1253,12 @@ static inline int pblk_set_read_mode(struct pblk *pblk, int type) | |||
1134 | struct nvm_geo *geo = &dev->geo; | 1253 | struct nvm_geo *geo = &dev->geo; |
1135 | int flags; | 1254 | int flags; |
1136 | 1255 | ||
1256 | if (geo->version == NVM_OCSSD_SPEC_20) | ||
1257 | return 0; | ||
1258 | |||
1137 | flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE; | 1259 | flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE; |
1138 | if (type == PBLK_READ_SEQUENTIAL) | 1260 | if (type == PBLK_READ_SEQUENTIAL) |
1139 | flags |= geo->plane_mode >> 1; | 1261 | flags |= geo->pln_mode >> 1; |
1140 | 1262 | ||
1141 | return flags; | 1263 | return flags; |
1142 | } | 1264 | } |
@@ -1147,16 +1269,21 @@ static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs) | |||
1147 | } | 1269 | } |
1148 | 1270 | ||
1149 | #ifdef CONFIG_NVM_DEBUG | 1271 | #ifdef CONFIG_NVM_DEBUG |
1150 | static inline void print_ppa(struct ppa_addr *p, char *msg, int error) | 1272 | static inline void print_ppa(struct nvm_geo *geo, struct ppa_addr *p, |
1273 | char *msg, int error) | ||
1151 | { | 1274 | { |
1152 | if (p->c.is_cached) { | 1275 | if (p->c.is_cached) { |
1153 | pr_err("ppa: (%s: %x) cache line: %llu\n", | 1276 | pr_err("ppa: (%s: %x) cache line: %llu\n", |
1154 | msg, error, (u64)p->c.line); | 1277 | msg, error, (u64)p->c.line); |
1155 | } else { | 1278 | } else if (geo->version == NVM_OCSSD_SPEC_12) { |
1156 | pr_err("ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n", | 1279 | pr_err("ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n", |
1157 | msg, error, | 1280 | msg, error, |
1158 | p->g.ch, p->g.lun, p->g.blk, | 1281 | p->g.ch, p->g.lun, p->g.blk, |
1159 | p->g.pg, p->g.pl, p->g.sec); | 1282 | p->g.pg, p->g.pl, p->g.sec); |
1283 | } else { | ||
1284 | pr_err("ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n", | ||
1285 | msg, error, | ||
1286 | p->m.grp, p->m.pu, p->m.chk, p->m.sec); | ||
1160 | } | 1287 | } |
1161 | } | 1288 | } |
1162 | 1289 | ||
@@ -1166,13 +1293,13 @@ static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd, | |||
1166 | int bit = -1; | 1293 | int bit = -1; |
1167 | 1294 | ||
1168 | if (rqd->nr_ppas == 1) { | 1295 | if (rqd->nr_ppas == 1) { |
1169 | print_ppa(&rqd->ppa_addr, "rqd", error); | 1296 | print_ppa(&pblk->dev->geo, &rqd->ppa_addr, "rqd", error); |
1170 | return; | 1297 | return; |
1171 | } | 1298 | } |
1172 | 1299 | ||
1173 | while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas, | 1300 | while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas, |
1174 | bit + 1)) < rqd->nr_ppas) { | 1301 | bit + 1)) < rqd->nr_ppas) { |
1175 | print_ppa(&rqd->ppa_list[bit], "rqd", error); | 1302 | print_ppa(&pblk->dev->geo, &rqd->ppa_list[bit], "rqd", error); |
1176 | } | 1303 | } |
1177 | 1304 | ||
1178 | pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status); | 1305 | pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status); |
@@ -1188,16 +1315,25 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev, | |||
1188 | for (i = 0; i < nr_ppas; i++) { | 1315 | for (i = 0; i < nr_ppas; i++) { |
1189 | ppa = &ppas[i]; | 1316 | ppa = &ppas[i]; |
1190 | 1317 | ||
1191 | if (!ppa->c.is_cached && | 1318 | if (geo->version == NVM_OCSSD_SPEC_12) { |
1192 | ppa->g.ch < geo->nr_chnls && | 1319 | if (!ppa->c.is_cached && |
1193 | ppa->g.lun < geo->nr_luns && | 1320 | ppa->g.ch < geo->num_ch && |
1194 | ppa->g.pl < geo->nr_planes && | 1321 | ppa->g.lun < geo->num_lun && |
1195 | ppa->g.blk < geo->nr_chks && | 1322 | ppa->g.pl < geo->num_pln && |
1196 | ppa->g.pg < geo->ws_per_chk && | 1323 | ppa->g.blk < geo->num_chk && |
1197 | ppa->g.sec < geo->sec_per_pg) | 1324 | ppa->g.pg < geo->num_pg && |
1198 | continue; | 1325 | ppa->g.sec < geo->ws_min) |
1326 | continue; | ||
1327 | } else { | ||
1328 | if (!ppa->c.is_cached && | ||
1329 | ppa->m.grp < geo->num_ch && | ||
1330 | ppa->m.pu < geo->num_lun && | ||
1331 | ppa->m.chk < geo->num_chk && | ||
1332 | ppa->m.sec < geo->clba) | ||
1333 | continue; | ||
1334 | } | ||
1199 | 1335 | ||
1200 | print_ppa(ppa, "boundary", i); | 1336 | print_ppa(geo, ppa, "boundary", i); |
1201 | 1337 | ||
1202 | return 1; | 1338 | return 1; |
1203 | } | 1339 | } |