diff options
author | Arun Easi <arun.easi@qlogic.com> | 2011-08-16 14:29:22 -0400 |
---|---|---|
committer | James Bottomley <JBottomley@Parallels.com> | 2011-08-27 09:55:46 -0400 |
commit | 8cb2049c744809193ed3707a37c09676a24599ee (patch) | |
tree | b9523a9a1be8f2610547036d849880d588f0a694 /drivers/scsi | |
parent | 01350d05539d1c95ef3568d062d864ab76ae7670 (diff) |
[SCSI] qla2xxx: T10 DIF - Handle uninitalized sectors.
Driver needs to update protection bytes for uninitialized sectors as they are
not DMA-d.
Signed-off-by: Arun Easi <arun.easi@qlogic.com>
Reviewed-by: Andrew Vasquez <andrew.vasquez@qlogic.com>
Signed-off-by: Chad Dupuis <chad.dupuis@qlogic.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/qla2xxx/qla_attr.c | 5 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_fw.h | 5 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_inline.h | 21 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_iocb.c | 233 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_isr.c | 90 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_os.c | 19 |
6 files changed, 335 insertions, 38 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 7836eb01c7fc..810067099801 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1788,11 +1788,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) | |||
1788 | 1788 | ||
1789 | if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { | 1789 | if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { |
1790 | if (ha->fw_attributes & BIT_4) { | 1790 | if (ha->fw_attributes & BIT_4) { |
1791 | int prot = 0; | ||
1791 | vha->flags.difdix_supported = 1; | 1792 | vha->flags.difdix_supported = 1; |
1792 | ql_dbg(ql_dbg_user, vha, 0x7082, | 1793 | ql_dbg(ql_dbg_user, vha, 0x7082, |
1793 | "Registered for DIF/DIX type 1 and 3 protection.\n"); | 1794 | "Registered for DIF/DIX type 1 and 3 protection.\n"); |
1795 | if (ql2xenabledif == 1) | ||
1796 | prot = SHOST_DIX_TYPE0_PROTECTION; | ||
1794 | scsi_host_set_prot(vha->host, | 1797 | scsi_host_set_prot(vha->host, |
1795 | SHOST_DIF_TYPE1_PROTECTION | 1798 | prot | SHOST_DIF_TYPE1_PROTECTION |
1796 | | SHOST_DIF_TYPE2_PROTECTION | 1799 | | SHOST_DIF_TYPE2_PROTECTION |
1797 | | SHOST_DIF_TYPE3_PROTECTION | 1800 | | SHOST_DIF_TYPE3_PROTECTION |
1798 | | SHOST_DIX_TYPE1_PROTECTION | 1801 | | SHOST_DIX_TYPE1_PROTECTION |
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 691783abfb69..aa69486dc064 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h | |||
@@ -537,6 +537,11 @@ struct sts_entry_24xx { | |||
537 | /* | 537 | /* |
538 | * If DIF Error is set in comp_status, these additional fields are | 538 | * If DIF Error is set in comp_status, these additional fields are |
539 | * defined: | 539 | * defined: |
540 | * | ||
541 | * !!! NOTE: Firmware sends expected/actual DIF data in big endian | ||
542 | * format; but all of the "data" field gets swab32-d in the beginning | ||
543 | * of qla2x00_status_entry(). | ||
544 | * | ||
540 | * &data[10] : uint8_t report_runt_bg[2]; - computed guard | 545 | * &data[10] : uint8_t report_runt_bg[2]; - computed guard |
541 | * &data[12] : uint8_t actual_dif[8]; - DIF Data received | 546 | * &data[12] : uint8_t actual_dif[8]; - DIF Data received |
542 | * &data[20] : uint8_t expected_dif[8]; - DIF Data computed | 547 | * &data[20] : uint8_t expected_dif[8]; - DIF Data computed |
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index d2e904bc21c0..c06e5f9b431e 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h | |||
@@ -102,3 +102,24 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state) | |||
102 | fcport->d_id.b.al_pa); | 102 | fcport->d_id.b.al_pa); |
103 | } | 103 | } |
104 | } | 104 | } |
105 | |||
106 | static inline int | ||
107 | qla2x00_hba_err_chk_enabled(unsigned char op) | ||
108 | { | ||
109 | switch (op) { | ||
110 | case SCSI_PROT_READ_STRIP: | ||
111 | case SCSI_PROT_WRITE_INSERT: | ||
112 | if (ql2xenablehba_err_chk >= 1) | ||
113 | return 1; | ||
114 | break; | ||
115 | case SCSI_PROT_READ_PASS: | ||
116 | case SCSI_PROT_WRITE_PASS: | ||
117 | if (ql2xenablehba_err_chk >= 2) | ||
118 | return 1; | ||
119 | break; | ||
120 | case SCSI_PROT_READ_INSERT: | ||
121 | case SCSI_PROT_WRITE_STRIP: | ||
122 | return 1; | ||
123 | } | ||
124 | return 0; | ||
125 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 49d6906af886..09ad3ce60064 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -717,12 +717,17 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, | |||
717 | unsigned char op = scsi_get_prot_op(cmd); | 717 | unsigned char op = scsi_get_prot_op(cmd); |
718 | 718 | ||
719 | switch (scsi_get_prot_type(cmd)) { | 719 | switch (scsi_get_prot_type(cmd)) { |
720 | /* For TYPE 0 protection: no checking */ | ||
721 | case SCSI_PROT_DIF_TYPE0: | 720 | case SCSI_PROT_DIF_TYPE0: |
722 | pkt->ref_tag_mask[0] = 0x00; | 721 | /* |
723 | pkt->ref_tag_mask[1] = 0x00; | 722 | * No check for ql2xenablehba_err_chk, as it would be an |
724 | pkt->ref_tag_mask[2] = 0x00; | 723 | * I/O error if hba tag generation is not done. |
725 | pkt->ref_tag_mask[3] = 0x00; | 724 | */ |
725 | pkt->ref_tag = cpu_to_le32((uint32_t) | ||
726 | (0xffffffff & scsi_get_lba(cmd))); | ||
727 | pkt->ref_tag_mask[0] = 0xff; | ||
728 | pkt->ref_tag_mask[1] = 0xff; | ||
729 | pkt->ref_tag_mask[2] = 0xff; | ||
730 | pkt->ref_tag_mask[3] = 0xff; | ||
726 | break; | 731 | break; |
727 | 732 | ||
728 | /* | 733 | /* |
@@ -730,7 +735,7 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, | |||
730 | * match LBA in CDB + N | 735 | * match LBA in CDB + N |
731 | */ | 736 | */ |
732 | case SCSI_PROT_DIF_TYPE2: | 737 | case SCSI_PROT_DIF_TYPE2: |
733 | if (!ql2xenablehba_err_chk) | 738 | if (!qla2x00_hba_err_chk_enabled(op)) |
734 | break; | 739 | break; |
735 | 740 | ||
736 | if (scsi_prot_sg_count(cmd)) { | 741 | if (scsi_prot_sg_count(cmd)) { |
@@ -763,7 +768,7 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, | |||
763 | * 16 bit app tag. | 768 | * 16 bit app tag. |
764 | */ | 769 | */ |
765 | case SCSI_PROT_DIF_TYPE1: | 770 | case SCSI_PROT_DIF_TYPE1: |
766 | if (!ql2xenablehba_err_chk) | 771 | if (!qla2x00_hba_err_chk_enabled(op)) |
767 | break; | 772 | break; |
768 | 773 | ||
769 | if (protcnt && (op == SCSI_PROT_WRITE_STRIP || | 774 | if (protcnt && (op == SCSI_PROT_WRITE_STRIP || |
@@ -798,8 +803,162 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, | |||
798 | scsi_get_prot_type(cmd), cmd); | 803 | scsi_get_prot_type(cmd), cmd); |
799 | } | 804 | } |
800 | 805 | ||
806 | struct qla2_sgx { | ||
807 | dma_addr_t dma_addr; /* OUT */ | ||
808 | uint32_t dma_len; /* OUT */ | ||
809 | |||
810 | uint32_t tot_bytes; /* IN */ | ||
811 | struct scatterlist *cur_sg; /* IN */ | ||
812 | |||
813 | /* for book keeping, bzero on initial invocation */ | ||
814 | uint32_t bytes_consumed; | ||
815 | uint32_t num_bytes; | ||
816 | uint32_t tot_partial; | ||
817 | |||
818 | /* for debugging */ | ||
819 | uint32_t num_sg; | ||
820 | srb_t *sp; | ||
821 | }; | ||
822 | |||
823 | static int | ||
824 | qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, | ||
825 | uint32_t *partial) | ||
826 | { | ||
827 | struct scatterlist *sg; | ||
828 | uint32_t cumulative_partial, sg_len; | ||
829 | dma_addr_t sg_dma_addr; | ||
830 | |||
831 | if (sgx->num_bytes == sgx->tot_bytes) | ||
832 | return 0; | ||
833 | |||
834 | sg = sgx->cur_sg; | ||
835 | cumulative_partial = sgx->tot_partial; | ||
836 | |||
837 | sg_dma_addr = sg_dma_address(sg); | ||
838 | sg_len = sg_dma_len(sg); | ||
839 | |||
840 | sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed; | ||
841 | |||
842 | if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) { | ||
843 | sgx->dma_len = (blk_sz - cumulative_partial); | ||
844 | sgx->tot_partial = 0; | ||
845 | sgx->num_bytes += blk_sz; | ||
846 | *partial = 0; | ||
847 | } else { | ||
848 | sgx->dma_len = sg_len - sgx->bytes_consumed; | ||
849 | sgx->tot_partial += sgx->dma_len; | ||
850 | *partial = 1; | ||
851 | } | ||
852 | |||
853 | sgx->bytes_consumed += sgx->dma_len; | ||
854 | |||
855 | if (sg_len == sgx->bytes_consumed) { | ||
856 | sg = sg_next(sg); | ||
857 | sgx->num_sg++; | ||
858 | sgx->cur_sg = sg; | ||
859 | sgx->bytes_consumed = 0; | ||
860 | } | ||
861 | |||
862 | return 1; | ||
863 | } | ||
801 | 864 | ||
802 | static int | 865 | static int |
866 | qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, | ||
867 | uint32_t *dsd, uint16_t tot_dsds) | ||
868 | { | ||
869 | void *next_dsd; | ||
870 | uint8_t avail_dsds = 0; | ||
871 | uint32_t dsd_list_len; | ||
872 | struct dsd_dma *dsd_ptr; | ||
873 | struct scatterlist *sg_prot; | ||
874 | uint32_t *cur_dsd = dsd; | ||
875 | uint16_t used_dsds = tot_dsds; | ||
876 | |||
877 | uint32_t prot_int; | ||
878 | uint32_t partial; | ||
879 | struct qla2_sgx sgx; | ||
880 | dma_addr_t sle_dma; | ||
881 | uint32_t sle_dma_len, tot_prot_dma_len = 0; | ||
882 | struct scsi_cmnd *cmd = sp->cmd; | ||
883 | |||
884 | prot_int = cmd->device->sector_size; | ||
885 | |||
886 | memset(&sgx, 0, sizeof(struct qla2_sgx)); | ||
887 | sgx.tot_bytes = scsi_bufflen(sp->cmd); | ||
888 | sgx.cur_sg = scsi_sglist(sp->cmd); | ||
889 | sgx.sp = sp; | ||
890 | |||
891 | sg_prot = scsi_prot_sglist(sp->cmd); | ||
892 | |||
893 | while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { | ||
894 | |||
895 | sle_dma = sgx.dma_addr; | ||
896 | sle_dma_len = sgx.dma_len; | ||
897 | alloc_and_fill: | ||
898 | /* Allocate additional continuation packets? */ | ||
899 | if (avail_dsds == 0) { | ||
900 | avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? | ||
901 | QLA_DSDS_PER_IOCB : used_dsds; | ||
902 | dsd_list_len = (avail_dsds + 1) * 12; | ||
903 | used_dsds -= avail_dsds; | ||
904 | |||
905 | /* allocate tracking DS */ | ||
906 | dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); | ||
907 | if (!dsd_ptr) | ||
908 | return 1; | ||
909 | |||
910 | /* allocate new list */ | ||
911 | dsd_ptr->dsd_addr = next_dsd = | ||
912 | dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, | ||
913 | &dsd_ptr->dsd_list_dma); | ||
914 | |||
915 | if (!next_dsd) { | ||
916 | /* | ||
917 | * Need to cleanup only this dsd_ptr, rest | ||
918 | * will be done by sp_free_dma() | ||
919 | */ | ||
920 | kfree(dsd_ptr); | ||
921 | return 1; | ||
922 | } | ||
923 | |||
924 | list_add_tail(&dsd_ptr->list, | ||
925 | &((struct crc_context *)sp->ctx)->dsd_list); | ||
926 | |||
927 | sp->flags |= SRB_CRC_CTX_DSD_VALID; | ||
928 | |||
929 | /* add new list to cmd iocb or last list */ | ||
930 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | ||
931 | *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); | ||
932 | *cur_dsd++ = dsd_list_len; | ||
933 | cur_dsd = (uint32_t *)next_dsd; | ||
934 | } | ||
935 | *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | ||
936 | *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | ||
937 | *cur_dsd++ = cpu_to_le32(sle_dma_len); | ||
938 | avail_dsds--; | ||
939 | |||
940 | if (partial == 0) { | ||
941 | /* Got a full protection interval */ | ||
942 | sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len; | ||
943 | sle_dma_len = 8; | ||
944 | |||
945 | tot_prot_dma_len += sle_dma_len; | ||
946 | if (tot_prot_dma_len == sg_dma_len(sg_prot)) { | ||
947 | tot_prot_dma_len = 0; | ||
948 | sg_prot = sg_next(sg_prot); | ||
949 | } | ||
950 | |||
951 | partial = 1; /* So as to not re-enter this block */ | ||
952 | goto alloc_and_fill; | ||
953 | } | ||
954 | } | ||
955 | /* Null termination */ | ||
956 | *cur_dsd++ = 0; | ||
957 | *cur_dsd++ = 0; | ||
958 | *cur_dsd++ = 0; | ||
959 | return 0; | ||
960 | } | ||
961 | static int | ||
803 | qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | 962 | qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, |
804 | uint16_t tot_dsds) | 963 | uint16_t tot_dsds) |
805 | { | 964 | { |
@@ -981,7 +1140,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
981 | struct scsi_cmnd *cmd; | 1140 | struct scsi_cmnd *cmd; |
982 | struct scatterlist *cur_seg; | 1141 | struct scatterlist *cur_seg; |
983 | int sgc; | 1142 | int sgc; |
984 | uint32_t total_bytes; | 1143 | uint32_t total_bytes = 0; |
985 | uint32_t data_bytes; | 1144 | uint32_t data_bytes; |
986 | uint32_t dif_bytes; | 1145 | uint32_t dif_bytes; |
987 | uint8_t bundling = 1; | 1146 | uint8_t bundling = 1; |
@@ -1023,8 +1182,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
1023 | __constant_cpu_to_le16(CF_READ_DATA); | 1182 | __constant_cpu_to_le16(CF_READ_DATA); |
1024 | } | 1183 | } |
1025 | 1184 | ||
1026 | tot_prot_dsds = scsi_prot_sg_count(cmd); | 1185 | if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) || |
1027 | if (!tot_prot_dsds) | 1186 | (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) || |
1187 | (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) || | ||
1188 | (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT)) | ||
1028 | bundling = 0; | 1189 | bundling = 0; |
1029 | 1190 | ||
1030 | /* Allocate CRC context from global pool */ | 1191 | /* Allocate CRC context from global pool */ |
@@ -1107,15 +1268,28 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
1107 | cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ | 1268 | cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ |
1108 | 1269 | ||
1109 | /* Compute dif len and adjust data len to incude protection */ | 1270 | /* Compute dif len and adjust data len to incude protection */ |
1110 | total_bytes = data_bytes; | ||
1111 | dif_bytes = 0; | 1271 | dif_bytes = 0; |
1112 | blk_size = cmd->device->sector_size; | 1272 | blk_size = cmd->device->sector_size; |
1113 | if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { | 1273 | dif_bytes = (data_bytes / blk_size) * 8; |
1114 | dif_bytes = (data_bytes / blk_size) * 8; | 1274 | |
1115 | total_bytes += dif_bytes; | 1275 | switch (scsi_get_prot_op(sp->cmd)) { |
1276 | case SCSI_PROT_READ_INSERT: | ||
1277 | case SCSI_PROT_WRITE_STRIP: | ||
1278 | total_bytes = data_bytes; | ||
1279 | data_bytes += dif_bytes; | ||
1280 | break; | ||
1281 | |||
1282 | case SCSI_PROT_READ_STRIP: | ||
1283 | case SCSI_PROT_WRITE_INSERT: | ||
1284 | case SCSI_PROT_READ_PASS: | ||
1285 | case SCSI_PROT_WRITE_PASS: | ||
1286 | total_bytes = data_bytes + dif_bytes; | ||
1287 | break; | ||
1288 | default: | ||
1289 | BUG(); | ||
1116 | } | 1290 | } |
1117 | 1291 | ||
1118 | if (!ql2xenablehba_err_chk) | 1292 | if (!qla2x00_hba_err_chk_enabled(scsi_get_prot_op(cmd))) |
1119 | fw_prot_opts |= 0x10; /* Disable Guard tag checking */ | 1293 | fw_prot_opts |= 0x10; /* Disable Guard tag checking */ |
1120 | 1294 | ||
1121 | if (!bundling) { | 1295 | if (!bundling) { |
@@ -1151,7 +1325,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
1151 | 1325 | ||
1152 | cmd_pkt->control_flags |= | 1326 | cmd_pkt->control_flags |= |
1153 | __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); | 1327 | __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); |
1154 | if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, | 1328 | |
1329 | if (!bundling && tot_prot_dsds) { | ||
1330 | if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, | ||
1331 | cur_dsd, tot_dsds)) | ||
1332 | goto crc_queuing_error; | ||
1333 | } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, | ||
1155 | (tot_dsds - tot_prot_dsds))) | 1334 | (tot_dsds - tot_prot_dsds))) |
1156 | goto crc_queuing_error; | 1335 | goto crc_queuing_error; |
1157 | 1336 | ||
@@ -1414,6 +1593,22 @@ qla24xx_dif_start_scsi(srb_t *sp) | |||
1414 | goto queuing_error; | 1593 | goto queuing_error; |
1415 | else | 1594 | else |
1416 | sp->flags |= SRB_DMA_VALID; | 1595 | sp->flags |= SRB_DMA_VALID; |
1596 | |||
1597 | if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || | ||
1598 | (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { | ||
1599 | struct qla2_sgx sgx; | ||
1600 | uint32_t partial; | ||
1601 | |||
1602 | memset(&sgx, 0, sizeof(struct qla2_sgx)); | ||
1603 | sgx.tot_bytes = scsi_bufflen(cmd); | ||
1604 | sgx.cur_sg = scsi_sglist(cmd); | ||
1605 | sgx.sp = sp; | ||
1606 | |||
1607 | nseg = 0; | ||
1608 | while (qla24xx_get_one_block_sg( | ||
1609 | cmd->device->sector_size, &sgx, &partial)) | ||
1610 | nseg++; | ||
1611 | } | ||
1417 | } else | 1612 | } else |
1418 | nseg = 0; | 1613 | nseg = 0; |
1419 | 1614 | ||
@@ -1428,6 +1623,11 @@ qla24xx_dif_start_scsi(srb_t *sp) | |||
1428 | goto queuing_error; | 1623 | goto queuing_error; |
1429 | else | 1624 | else |
1430 | sp->flags |= SRB_CRC_PROT_DMA_VALID; | 1625 | sp->flags |= SRB_CRC_PROT_DMA_VALID; |
1626 | |||
1627 | if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || | ||
1628 | (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) { | ||
1629 | nseg = scsi_bufflen(cmd) / cmd->device->sector_size; | ||
1630 | } | ||
1431 | } else { | 1631 | } else { |
1432 | nseg = 0; | 1632 | nseg = 0; |
1433 | } | 1633 | } |
@@ -1454,6 +1654,7 @@ qla24xx_dif_start_scsi(srb_t *sp) | |||
1454 | /* Build header part of command packet (excluding the OPCODE). */ | 1654 | /* Build header part of command packet (excluding the OPCODE). */ |
1455 | req->current_outstanding_cmd = handle; | 1655 | req->current_outstanding_cmd = handle; |
1456 | req->outstanding_cmds[handle] = sp; | 1656 | req->outstanding_cmds[handle] = sp; |
1657 | sp->handle = handle; | ||
1457 | sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; | 1658 | sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; |
1458 | req->cnt -= req_cnt; | 1659 | req->cnt -= req_cnt; |
1459 | 1660 | ||
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index b16b7725dee0..53339f10a598 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -1435,25 +1435,27 @@ struct scsi_dif_tuple { | |||
1435 | * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST | 1435 | * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST |
1436 | * to indicate to the kernel that the HBA detected error. | 1436 | * to indicate to the kernel that the HBA detected error. |
1437 | */ | 1437 | */ |
1438 | static inline void | 1438 | static inline int |
1439 | qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) | 1439 | qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) |
1440 | { | 1440 | { |
1441 | struct scsi_qla_host *vha = sp->fcport->vha; | 1441 | struct scsi_qla_host *vha = sp->fcport->vha; |
1442 | struct scsi_cmnd *cmd = sp->cmd; | 1442 | struct scsi_cmnd *cmd = sp->cmd; |
1443 | struct scsi_dif_tuple *ep = | 1443 | uint8_t *ap = &sts24->data[12]; |
1444 | (struct scsi_dif_tuple *)&sts24->data[20]; | 1444 | uint8_t *ep = &sts24->data[20]; |
1445 | struct scsi_dif_tuple *ap = | ||
1446 | (struct scsi_dif_tuple *)&sts24->data[12]; | ||
1447 | uint32_t e_ref_tag, a_ref_tag; | 1445 | uint32_t e_ref_tag, a_ref_tag; |
1448 | uint16_t e_app_tag, a_app_tag; | 1446 | uint16_t e_app_tag, a_app_tag; |
1449 | uint16_t e_guard, a_guard; | 1447 | uint16_t e_guard, a_guard; |
1450 | 1448 | ||
1451 | e_ref_tag = be32_to_cpu(ep->ref_tag); | 1449 | /* |
1452 | a_ref_tag = be32_to_cpu(ap->ref_tag); | 1450 | * swab32 of the "data" field in the beginning of qla2x00_status_entry() |
1453 | e_app_tag = be16_to_cpu(ep->app_tag); | 1451 | * would make guard field appear at offset 2 |
1454 | a_app_tag = be16_to_cpu(ap->app_tag); | 1452 | */ |
1455 | e_guard = be16_to_cpu(ep->guard); | 1453 | a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); |
1456 | a_guard = be16_to_cpu(ap->guard); | 1454 | a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); |
1455 | a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); | ||
1456 | e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); | ||
1457 | e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); | ||
1458 | e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); | ||
1457 | 1459 | ||
1458 | ql_dbg(ql_dbg_io, vha, 0x3023, | 1460 | ql_dbg(ql_dbg_io, vha, 0x3023, |
1459 | "iocb(s) %p Returned STATUS.\n", sts24); | 1461 | "iocb(s) %p Returned STATUS.\n", sts24); |
@@ -1465,6 +1467,63 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) | |||
1465 | cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, | 1467 | cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, |
1466 | a_app_tag, e_app_tag, a_guard, e_guard); | 1468 | a_app_tag, e_app_tag, a_guard, e_guard); |
1467 | 1469 | ||
1470 | /* | ||
1471 | * Ignore sector if: | ||
1472 | * For type 3: ref & app tag is all 'f's | ||
1473 | * For type 0,1,2: app tag is all 'f's | ||
1474 | */ | ||
1475 | if ((a_app_tag == 0xffff) && | ||
1476 | ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || | ||
1477 | (a_ref_tag == 0xffffffff))) { | ||
1478 | uint32_t blocks_done, resid; | ||
1479 | sector_t lba_s = scsi_get_lba(cmd); | ||
1480 | |||
1481 | /* 2TB boundary case covered automatically with this */ | ||
1482 | blocks_done = e_ref_tag - (uint32_t)lba_s + 1; | ||
1483 | |||
1484 | resid = scsi_bufflen(cmd) - (blocks_done * | ||
1485 | cmd->device->sector_size); | ||
1486 | |||
1487 | scsi_set_resid(cmd, resid); | ||
1488 | cmd->result = DID_OK << 16; | ||
1489 | |||
1490 | /* Update protection tag */ | ||
1491 | if (scsi_prot_sg_count(cmd)) { | ||
1492 | uint32_t i, j = 0, k = 0, num_ent; | ||
1493 | struct scatterlist *sg; | ||
1494 | struct sd_dif_tuple *spt; | ||
1495 | |||
1496 | /* Patch the corresponding protection tags */ | ||
1497 | scsi_for_each_prot_sg(cmd, sg, | ||
1498 | scsi_prot_sg_count(cmd), i) { | ||
1499 | num_ent = sg_dma_len(sg) / 8; | ||
1500 | if (k + num_ent < blocks_done) { | ||
1501 | k += num_ent; | ||
1502 | continue; | ||
1503 | } | ||
1504 | j = blocks_done - k - 1; | ||
1505 | k = blocks_done; | ||
1506 | break; | ||
1507 | } | ||
1508 | |||
1509 | if (k != blocks_done) { | ||
1510 | qla_printk(KERN_WARNING, sp->fcport->vha->hw, | ||
1511 | "unexpected tag values tag:lba=%x:%lx)\n", | ||
1512 | e_ref_tag, lba_s); | ||
1513 | return 1; | ||
1514 | } | ||
1515 | |||
1516 | spt = page_address(sg_page(sg)) + sg->offset; | ||
1517 | spt += j; | ||
1518 | |||
1519 | spt->app_tag = 0xffff; | ||
1520 | if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) | ||
1521 | spt->ref_tag = 0xffffffff; | ||
1522 | } | ||
1523 | |||
1524 | return 0; | ||
1525 | } | ||
1526 | |||
1468 | /* check guard */ | 1527 | /* check guard */ |
1469 | if (e_guard != a_guard) { | 1528 | if (e_guard != a_guard) { |
1470 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, | 1529 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, |
@@ -1472,7 +1531,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) | |||
1472 | set_driver_byte(cmd, DRIVER_SENSE); | 1531 | set_driver_byte(cmd, DRIVER_SENSE); |
1473 | set_host_byte(cmd, DID_ABORT); | 1532 | set_host_byte(cmd, DID_ABORT); |
1474 | cmd->result |= SAM_STAT_CHECK_CONDITION << 1; | 1533 | cmd->result |= SAM_STAT_CHECK_CONDITION << 1; |
1475 | return; | 1534 | return 1; |
1476 | } | 1535 | } |
1477 | 1536 | ||
1478 | /* check appl tag */ | 1537 | /* check appl tag */ |
@@ -1482,7 +1541,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) | |||
1482 | set_driver_byte(cmd, DRIVER_SENSE); | 1541 | set_driver_byte(cmd, DRIVER_SENSE); |
1483 | set_host_byte(cmd, DID_ABORT); | 1542 | set_host_byte(cmd, DID_ABORT); |
1484 | cmd->result |= SAM_STAT_CHECK_CONDITION << 1; | 1543 | cmd->result |= SAM_STAT_CHECK_CONDITION << 1; |
1485 | return; | 1544 | return 1; |
1486 | } | 1545 | } |
1487 | 1546 | ||
1488 | /* check ref tag */ | 1547 | /* check ref tag */ |
@@ -1492,8 +1551,9 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) | |||
1492 | set_driver_byte(cmd, DRIVER_SENSE); | 1551 | set_driver_byte(cmd, DRIVER_SENSE); |
1493 | set_host_byte(cmd, DID_ABORT); | 1552 | set_host_byte(cmd, DID_ABORT); |
1494 | cmd->result |= SAM_STAT_CHECK_CONDITION << 1; | 1553 | cmd->result |= SAM_STAT_CHECK_CONDITION << 1; |
1495 | return; | 1554 | return 1; |
1496 | } | 1555 | } |
1556 | return 1; | ||
1497 | } | 1557 | } |
1498 | 1558 | ||
1499 | /** | 1559 | /** |
@@ -1767,7 +1827,7 @@ check_scsi_status: | |||
1767 | break; | 1827 | break; |
1768 | 1828 | ||
1769 | case CS_DIF_ERROR: | 1829 | case CS_DIF_ERROR: |
1770 | qla2x00_handle_dif_error(sp, sts24); | 1830 | logit = qla2x00_handle_dif_error(sp, sts24); |
1771 | break; | 1831 | break; |
1772 | default: | 1832 | default: |
1773 | cp->result = DID_ERROR << 16; | 1833 | cp->result = DID_ERROR << 16; |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e02df276804e..d65a3005b439 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -106,17 +106,21 @@ MODULE_PARM_DESC(ql2xmaxqdepth, | |||
106 | "Maximum queue depth to report for target devices."); | 106 | "Maximum queue depth to report for target devices."); |
107 | 107 | ||
108 | /* Do not change the value of this after module load */ | 108 | /* Do not change the value of this after module load */ |
109 | int ql2xenabledif = 1; | 109 | int ql2xenabledif = 0; |
110 | module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR); | 110 | module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR); |
111 | MODULE_PARM_DESC(ql2xenabledif, | 111 | MODULE_PARM_DESC(ql2xenabledif, |
112 | " Enable T10-CRC-DIF " | 112 | " Enable T10-CRC-DIF " |
113 | " Default is 0 - No DIF Support. 1 - Enable it"); | 113 | " Default is 0 - No DIF Support. 1 - Enable it" |
114 | ", 2 - Enable DIF for all types, except Type 0."); | ||
114 | 115 | ||
115 | int ql2xenablehba_err_chk; | 116 | int ql2xenablehba_err_chk = 2; |
116 | module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); | 117 | module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); |
117 | MODULE_PARM_DESC(ql2xenablehba_err_chk, | 118 | MODULE_PARM_DESC(ql2xenablehba_err_chk, |
118 | " Enable T10-CRC-DIF Error isolation by HBA" | 119 | " Enable T10-CRC-DIF Error isolation by HBA:\n" |
119 | " Default is 0 - Error isolation disabled, 1 - Enable it"); | 120 | " Default is 1.\n" |
121 | " 0 -- Error isolation disabled\n" | ||
122 | " 1 -- Error isolation enabled only for DIX Type 0\n" | ||
123 | " 2 -- Error isolation enabled for all Types\n"); | ||
120 | 124 | ||
121 | int ql2xiidmaenable=1; | 125 | int ql2xiidmaenable=1; |
122 | module_param(ql2xiidmaenable, int, S_IRUGO); | 126 | module_param(ql2xiidmaenable, int, S_IRUGO); |
@@ -2380,11 +2384,14 @@ skip_dpc: | |||
2380 | 2384 | ||
2381 | if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { | 2385 | if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) { |
2382 | if (ha->fw_attributes & BIT_4) { | 2386 | if (ha->fw_attributes & BIT_4) { |
2387 | int prot = 0; | ||
2383 | base_vha->flags.difdix_supported = 1; | 2388 | base_vha->flags.difdix_supported = 1; |
2384 | ql_dbg(ql_dbg_init, base_vha, 0x00f1, | 2389 | ql_dbg(ql_dbg_init, base_vha, 0x00f1, |
2385 | "Registering for DIF/DIX type 1 and 3 protection.\n"); | 2390 | "Registering for DIF/DIX type 1 and 3 protection.\n"); |
2391 | if (ql2xenabledif == 1) | ||
2392 | prot = SHOST_DIX_TYPE0_PROTECTION; | ||
2386 | scsi_host_set_prot(host, | 2393 | scsi_host_set_prot(host, |
2387 | SHOST_DIF_TYPE1_PROTECTION | 2394 | prot | SHOST_DIF_TYPE1_PROTECTION |
2388 | | SHOST_DIF_TYPE2_PROTECTION | 2395 | | SHOST_DIF_TYPE2_PROTECTION |
2389 | | SHOST_DIF_TYPE3_PROTECTION | 2396 | | SHOST_DIF_TYPE3_PROTECTION |
2390 | | SHOST_DIX_TYPE1_PROTECTION | 2397 | | SHOST_DIX_TYPE1_PROTECTION |