diff options
-rw-r--r-- | drivers/scsi/qla2xxx/qla_dbg.c | 2 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_def.h | 14 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_gbl.h | 7 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_inline.h | 13 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_iocb.c | 134 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_isr.c | 9 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_os.c | 2 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_target.c | 621 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_target.h | 84 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/tcm_qla2xxx.c | 41 |
10 files changed, 851 insertions, 76 deletions
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index cdff3aa380cb..781ca5b09362 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
@@ -66,7 +66,7 @@ | |||
66 | * | | | 0xd030-0xd0ff | | 66 | * | | | 0xd030-0xd0ff | |
67 | * | | | 0xd101-0xd1fe | | 67 | * | | | 0xd101-0xd1fe | |
68 | * | | | 0xd213-0xd2fe | | 68 | * | | | 0xd213-0xd2fe | |
69 | * | Target Mode | 0xe070 | 0xe021 | | 69 | * | Target Mode | 0xe078 | | |
70 | * | Target Mode Management | 0xf072 | 0xf002-0xf003 | | 70 | * | Target Mode Management | 0xf072 | 0xf002-0xf003 | |
71 | * | | | 0xf046-0xf049 | | 71 | * | | | 0xf046-0xf049 | |
72 | * | Target Mode Task Management | 0x1000b | | | 72 | * | Target Mode Task Management | 0x1000b | | |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 5c590d40e676..1fa010448666 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -1629,10 +1629,20 @@ typedef struct { | |||
1629 | #define PO_MODE_DIF_PASS 2 | 1629 | #define PO_MODE_DIF_PASS 2 |
1630 | #define PO_MODE_DIF_REPLACE 3 | 1630 | #define PO_MODE_DIF_REPLACE 3 |
1631 | #define PO_MODE_DIF_TCP_CKSUM 6 | 1631 | #define PO_MODE_DIF_TCP_CKSUM 6 |
1632 | #define PO_ENABLE_DIF_BUNDLING BIT_8 | ||
1633 | #define PO_ENABLE_INCR_GUARD_SEED BIT_3 | 1632 | #define PO_ENABLE_INCR_GUARD_SEED BIT_3 |
1634 | #define PO_DISABLE_INCR_REF_TAG BIT_5 | ||
1635 | #define PO_DISABLE_GUARD_CHECK BIT_4 | 1633 | #define PO_DISABLE_GUARD_CHECK BIT_4 |
1634 | #define PO_DISABLE_INCR_REF_TAG BIT_5 | ||
1635 | #define PO_DIS_HEADER_MODE BIT_7 | ||
1636 | #define PO_ENABLE_DIF_BUNDLING BIT_8 | ||
1637 | #define PO_DIS_FRAME_MODE BIT_9 | ||
1638 | #define PO_DIS_VALD_APP_ESC BIT_10 /* Dis validation for escape tag/ffffh */ | ||
1639 | #define PO_DIS_VALD_APP_REF_ESC BIT_11 | ||
1640 | |||
1641 | #define PO_DIS_APP_TAG_REPL BIT_12 /* disable REG Tag replacement */ | ||
1642 | #define PO_DIS_REF_TAG_REPL BIT_13 | ||
1643 | #define PO_DIS_APP_TAG_VALD BIT_14 /* disable REF Tag validation */ | ||
1644 | #define PO_DIS_REF_TAG_VALD BIT_15 | ||
1645 | |||
1636 | /* | 1646 | /* |
1637 | * ISP queue - 64-Bit addressing, continuation crc entry structure definition. | 1647 | * ISP queue - 64-Bit addressing, continuation crc entry structure definition. |
1638 | */ | 1648 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index a96efff4146a..d48dea8fab1b 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
@@ -220,6 +220,13 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); | |||
220 | 220 | ||
221 | extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); | 221 | extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); |
222 | extern int qla2x00_issue_marker(scsi_qla_host_t *, int); | 222 | extern int qla2x00_issue_marker(scsi_qla_host_t *, int); |
223 | extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, | ||
224 | uint32_t *, uint16_t, struct qla_tgt_cmd *); | ||
225 | extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, | ||
226 | uint32_t *, uint16_t, struct qla_tgt_cmd *); | ||
227 | extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, | ||
228 | uint32_t *, uint16_t, struct qla_tgt_cmd *); | ||
229 | |||
223 | 230 | ||
224 | /* | 231 | /* |
225 | * Global Function Prototypes in qla_mbx.c source file. | 232 | * Global Function Prototypes in qla_mbx.c source file. |
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index be5b20446e5c..b3b1d6fc2d6c 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h | |||
@@ -5,6 +5,7 @@ | |||
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include "qla_target.h" | ||
8 | /** | 9 | /** |
9 | * qla24xx_calc_iocbs() - Determine number of Command Type 3 and | 10 | * qla24xx_calc_iocbs() - Determine number of Command Type 3 and |
10 | * Continuation Type 1 IOCBs to allocate. | 11 | * Continuation Type 1 IOCBs to allocate. |
@@ -128,12 +129,20 @@ qla2x00_clear_loop_id(fc_port_t *fcport) { | |||
128 | } | 129 | } |
129 | 130 | ||
130 | static inline void | 131 | static inline void |
131 | qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp) | 132 | qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp, |
133 | struct qla_tgt_cmd *tc) | ||
132 | { | 134 | { |
133 | struct dsd_dma *dsd_ptr, *tdsd_ptr; | 135 | struct dsd_dma *dsd_ptr, *tdsd_ptr; |
134 | struct crc_context *ctx; | 136 | struct crc_context *ctx; |
135 | 137 | ||
136 | ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); | 138 | if (sp) |
139 | ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); | ||
140 | else if (tc) | ||
141 | ctx = (struct crc_context *)tc->ctx; | ||
142 | else { | ||
143 | BUG(); | ||
144 | return; | ||
145 | } | ||
137 | 146 | ||
138 | /* clean up allocated prev pool */ | 147 | /* clean up allocated prev pool */ |
139 | list_for_each_entry_safe(dsd_ptr, tdsd_ptr, | 148 | list_for_each_entry_safe(dsd_ptr, tdsd_ptr, |
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index af83132141f7..760931529592 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -936,9 +936,9 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, | |||
936 | return 1; | 936 | return 1; |
937 | } | 937 | } |
938 | 938 | ||
939 | static int | 939 | int |
940 | qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, | 940 | qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, |
941 | uint32_t *dsd, uint16_t tot_dsds) | 941 | uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) |
942 | { | 942 | { |
943 | void *next_dsd; | 943 | void *next_dsd; |
944 | uint8_t avail_dsds = 0; | 944 | uint8_t avail_dsds = 0; |
@@ -948,21 +948,35 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, | |||
948 | uint32_t *cur_dsd = dsd; | 948 | uint32_t *cur_dsd = dsd; |
949 | uint16_t used_dsds = tot_dsds; | 949 | uint16_t used_dsds = tot_dsds; |
950 | 950 | ||
951 | uint32_t prot_int; | 951 | uint32_t prot_int; /* protection interval */ |
952 | uint32_t partial; | 952 | uint32_t partial; |
953 | struct qla2_sgx sgx; | 953 | struct qla2_sgx sgx; |
954 | dma_addr_t sle_dma; | 954 | dma_addr_t sle_dma; |
955 | uint32_t sle_dma_len, tot_prot_dma_len = 0; | 955 | uint32_t sle_dma_len, tot_prot_dma_len = 0; |
956 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); | 956 | struct scsi_cmnd *cmd; |
957 | 957 | struct scsi_qla_host *vha; | |
958 | prot_int = cmd->device->sector_size; | ||
959 | 958 | ||
960 | memset(&sgx, 0, sizeof(struct qla2_sgx)); | 959 | memset(&sgx, 0, sizeof(struct qla2_sgx)); |
961 | sgx.tot_bytes = scsi_bufflen(cmd); | 960 | if (sp) { |
962 | sgx.cur_sg = scsi_sglist(cmd); | 961 | vha = sp->fcport->vha; |
963 | sgx.sp = sp; | 962 | cmd = GET_CMD_SP(sp); |
964 | 963 | prot_int = cmd->device->sector_size; | |
965 | sg_prot = scsi_prot_sglist(cmd); | 964 | |
965 | sgx.tot_bytes = scsi_bufflen(cmd); | ||
966 | sgx.cur_sg = scsi_sglist(cmd); | ||
967 | sgx.sp = sp; | ||
968 | |||
969 | sg_prot = scsi_prot_sglist(cmd); | ||
970 | } else if (tc) { | ||
971 | vha = tc->vha; | ||
972 | prot_int = tc->blk_sz; | ||
973 | sgx.tot_bytes = tc->bufflen; | ||
974 | sgx.cur_sg = tc->sg; | ||
975 | sg_prot = tc->prot_sg; | ||
976 | } else { | ||
977 | BUG(); | ||
978 | return 1; | ||
979 | } | ||
966 | 980 | ||
967 | while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { | 981 | while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { |
968 | 982 | ||
@@ -995,10 +1009,18 @@ alloc_and_fill: | |||
995 | return 1; | 1009 | return 1; |
996 | } | 1010 | } |
997 | 1011 | ||
998 | list_add_tail(&dsd_ptr->list, | 1012 | if (sp) { |
999 | &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); | 1013 | list_add_tail(&dsd_ptr->list, |
1014 | &((struct crc_context *) | ||
1015 | sp->u.scmd.ctx)->dsd_list); | ||
1016 | |||
1017 | sp->flags |= SRB_CRC_CTX_DSD_VALID; | ||
1018 | } else { | ||
1019 | list_add_tail(&dsd_ptr->list, | ||
1020 | &(tc->ctx->dsd_list)); | ||
1021 | tc->ctx_dsd_alloced = 1; | ||
1022 | } | ||
1000 | 1023 | ||
1001 | sp->flags |= SRB_CRC_CTX_DSD_VALID; | ||
1002 | 1024 | ||
1003 | /* add new list to cmd iocb or last list */ | 1025 | /* add new list to cmd iocb or last list */ |
1004 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | 1026 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); |
@@ -1033,21 +1055,35 @@ alloc_and_fill: | |||
1033 | return 0; | 1055 | return 0; |
1034 | } | 1056 | } |
1035 | 1057 | ||
1036 | static int | 1058 | int |
1037 | qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | 1059 | qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, |
1038 | uint16_t tot_dsds) | 1060 | uint16_t tot_dsds, struct qla_tgt_cmd *tc) |
1039 | { | 1061 | { |
1040 | void *next_dsd; | 1062 | void *next_dsd; |
1041 | uint8_t avail_dsds = 0; | 1063 | uint8_t avail_dsds = 0; |
1042 | uint32_t dsd_list_len; | 1064 | uint32_t dsd_list_len; |
1043 | struct dsd_dma *dsd_ptr; | 1065 | struct dsd_dma *dsd_ptr; |
1044 | struct scatterlist *sg; | 1066 | struct scatterlist *sg, *sgl; |
1045 | uint32_t *cur_dsd = dsd; | 1067 | uint32_t *cur_dsd = dsd; |
1046 | int i; | 1068 | int i; |
1047 | uint16_t used_dsds = tot_dsds; | 1069 | uint16_t used_dsds = tot_dsds; |
1048 | struct scsi_cmnd *cmd = GET_CMD_SP(sp); | 1070 | struct scsi_cmnd *cmd; |
1071 | struct scsi_qla_host *vha; | ||
1072 | |||
1073 | if (sp) { | ||
1074 | cmd = GET_CMD_SP(sp); | ||
1075 | sgl = scsi_sglist(cmd); | ||
1076 | vha = sp->fcport->vha; | ||
1077 | } else if (tc) { | ||
1078 | sgl = tc->sg; | ||
1079 | vha = tc->vha; | ||
1080 | } else { | ||
1081 | BUG(); | ||
1082 | return 1; | ||
1083 | } | ||
1049 | 1084 | ||
1050 | scsi_for_each_sg(cmd, sg, tot_dsds, i) { | 1085 | |
1086 | for_each_sg(sgl, sg, tot_dsds, i) { | ||
1051 | dma_addr_t sle_dma; | 1087 | dma_addr_t sle_dma; |
1052 | 1088 | ||
1053 | /* Allocate additional continuation packets? */ | 1089 | /* Allocate additional continuation packets? */ |
@@ -1076,10 +1112,17 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | |||
1076 | return 1; | 1112 | return 1; |
1077 | } | 1113 | } |
1078 | 1114 | ||
1079 | list_add_tail(&dsd_ptr->list, | 1115 | if (sp) { |
1080 | &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); | 1116 | list_add_tail(&dsd_ptr->list, |
1117 | &((struct crc_context *) | ||
1118 | sp->u.scmd.ctx)->dsd_list); | ||
1081 | 1119 | ||
1082 | sp->flags |= SRB_CRC_CTX_DSD_VALID; | 1120 | sp->flags |= SRB_CRC_CTX_DSD_VALID; |
1121 | } else { | ||
1122 | list_add_tail(&dsd_ptr->list, | ||
1123 | &(tc->ctx->dsd_list)); | ||
1124 | tc->ctx_dsd_alloced = 1; | ||
1125 | } | ||
1083 | 1126 | ||
1084 | /* add new list to cmd iocb or last list */ | 1127 | /* add new list to cmd iocb or last list */ |
1085 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | 1128 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); |
@@ -1102,23 +1145,37 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | |||
1102 | return 0; | 1145 | return 0; |
1103 | } | 1146 | } |
1104 | 1147 | ||
1105 | static int | 1148 | int |
1106 | qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, | 1149 | qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, |
1107 | uint32_t *dsd, | 1150 | uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) |
1108 | uint16_t tot_dsds) | ||
1109 | { | 1151 | { |
1110 | void *next_dsd; | 1152 | void *next_dsd; |
1111 | uint8_t avail_dsds = 0; | 1153 | uint8_t avail_dsds = 0; |
1112 | uint32_t dsd_list_len; | 1154 | uint32_t dsd_list_len; |
1113 | struct dsd_dma *dsd_ptr; | 1155 | struct dsd_dma *dsd_ptr; |
1114 | struct scatterlist *sg; | 1156 | struct scatterlist *sg, *sgl; |
1115 | int i; | 1157 | int i; |
1116 | struct scsi_cmnd *cmd; | 1158 | struct scsi_cmnd *cmd; |
1117 | uint32_t *cur_dsd = dsd; | 1159 | uint32_t *cur_dsd = dsd; |
1118 | uint16_t used_dsds = tot_dsds; | 1160 | uint16_t used_dsds = tot_dsds; |
1161 | struct scsi_qla_host *vha; | ||
1162 | |||
1163 | if (sp) { | ||
1164 | cmd = GET_CMD_SP(sp); | ||
1165 | sgl = scsi_prot_sglist(cmd); | ||
1166 | vha = sp->fcport->vha; | ||
1167 | } else if (tc) { | ||
1168 | vha = tc->vha; | ||
1169 | sgl = tc->prot_sg; | ||
1170 | } else { | ||
1171 | BUG(); | ||
1172 | return 1; | ||
1173 | } | ||
1119 | 1174 | ||
1120 | cmd = GET_CMD_SP(sp); | 1175 | ql_dbg(ql_dbg_tgt, vha, 0xe021, |
1121 | scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) { | 1176 | "%s: enter\n", __func__); |
1177 | |||
1178 | for_each_sg(sgl, sg, tot_dsds, i) { | ||
1122 | dma_addr_t sle_dma; | 1179 | dma_addr_t sle_dma; |
1123 | 1180 | ||
1124 | /* Allocate additional continuation packets? */ | 1181 | /* Allocate additional continuation packets? */ |
@@ -1147,10 +1204,17 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, | |||
1147 | return 1; | 1204 | return 1; |
1148 | } | 1205 | } |
1149 | 1206 | ||
1150 | list_add_tail(&dsd_ptr->list, | 1207 | if (sp) { |
1151 | &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); | 1208 | list_add_tail(&dsd_ptr->list, |
1209 | &((struct crc_context *) | ||
1210 | sp->u.scmd.ctx)->dsd_list); | ||
1152 | 1211 | ||
1153 | sp->flags |= SRB_CRC_CTX_DSD_VALID; | 1212 | sp->flags |= SRB_CRC_CTX_DSD_VALID; |
1213 | } else { | ||
1214 | list_add_tail(&dsd_ptr->list, | ||
1215 | &(tc->ctx->dsd_list)); | ||
1216 | tc->ctx_dsd_alloced = 1; | ||
1217 | } | ||
1154 | 1218 | ||
1155 | /* add new list to cmd iocb or last list */ | 1219 | /* add new list to cmd iocb or last list */ |
1156 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | 1220 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); |
@@ -1386,10 +1450,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
1386 | 1450 | ||
1387 | if (!bundling && tot_prot_dsds) { | 1451 | if (!bundling && tot_prot_dsds) { |
1388 | if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, | 1452 | if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, |
1389 | cur_dsd, tot_dsds)) | 1453 | cur_dsd, tot_dsds, NULL)) |
1390 | goto crc_queuing_error; | 1454 | goto crc_queuing_error; |
1391 | } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, | 1455 | } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, |
1392 | (tot_dsds - tot_prot_dsds))) | 1456 | (tot_dsds - tot_prot_dsds), NULL)) |
1393 | goto crc_queuing_error; | 1457 | goto crc_queuing_error; |
1394 | 1458 | ||
1395 | if (bundling && tot_prot_dsds) { | 1459 | if (bundling && tot_prot_dsds) { |
@@ -1398,7 +1462,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | |||
1398 | __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); | 1462 | __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); |
1399 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; | 1463 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; |
1400 | if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, | 1464 | if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, |
1401 | tot_prot_dsds)) | 1465 | tot_prot_dsds, NULL)) |
1402 | goto crc_queuing_error; | 1466 | goto crc_queuing_error; |
1403 | } | 1467 | } |
1404 | return QLA_SUCCESS; | 1468 | return QLA_SUCCESS; |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 014f8c310b31..a56825c73c31 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -2474,12 +2474,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, | |||
2474 | if (pkt->entry_status != 0) { | 2474 | if (pkt->entry_status != 0) { |
2475 | qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); | 2475 | qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); |
2476 | 2476 | ||
2477 | (void)qlt_24xx_process_response_error(vha, pkt); | 2477 | if (qlt_24xx_process_response_error(vha, pkt)) |
2478 | goto process_err; | ||
2478 | 2479 | ||
2479 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; | 2480 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; |
2480 | wmb(); | 2481 | wmb(); |
2481 | continue; | 2482 | continue; |
2482 | } | 2483 | } |
2484 | process_err: | ||
2483 | 2485 | ||
2484 | switch (pkt->entry_type) { | 2486 | switch (pkt->entry_type) { |
2485 | case STATUS_TYPE: | 2487 | case STATUS_TYPE: |
@@ -2496,10 +2498,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, | |||
2496 | qla24xx_logio_entry(vha, rsp->req, | 2498 | qla24xx_logio_entry(vha, rsp->req, |
2497 | (struct logio_entry_24xx *)pkt); | 2499 | (struct logio_entry_24xx *)pkt); |
2498 | break; | 2500 | break; |
2499 | case CT_IOCB_TYPE: | 2501 | case CT_IOCB_TYPE: |
2500 | qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); | 2502 | qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); |
2501 | break; | 2503 | break; |
2502 | case ELS_IOCB_TYPE: | 2504 | case ELS_IOCB_TYPE: |
2503 | qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); | 2505 | qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); |
2504 | break; | 2506 | break; |
2505 | case ABTS_RECV_24XX: | 2507 | case ABTS_RECV_24XX: |
@@ -2508,6 +2510,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, | |||
2508 | case ABTS_RESP_24XX: | 2510 | case ABTS_RESP_24XX: |
2509 | case CTIO_TYPE7: | 2511 | case CTIO_TYPE7: |
2510 | case NOTIFY_ACK_TYPE: | 2512 | case NOTIFY_ACK_TYPE: |
2513 | case CTIO_CRC2: | ||
2511 | qlt_response_pkt_all_vps(vha, (response_t *)pkt); | 2514 | qlt_response_pkt_all_vps(vha, (response_t *)pkt); |
2512 | break; | 2515 | break; |
2513 | case MARKER_TYPE: | 2516 | case MARKER_TYPE: |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index ef6332b61c18..5a70e24d9fe3 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -616,7 +616,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr) | |||
616 | 616 | ||
617 | if (sp->flags & SRB_CRC_CTX_DSD_VALID) { | 617 | if (sp->flags & SRB_CRC_CTX_DSD_VALID) { |
618 | /* List assured to be having elements */ | 618 | /* List assured to be having elements */ |
619 | qla2x00_clean_dsd_pool(ha, sp); | 619 | qla2x00_clean_dsd_pool(ha, sp, NULL); |
620 | sp->flags &= ~SRB_CRC_CTX_DSD_VALID; | 620 | sp->flags &= ~SRB_CRC_CTX_DSD_VALID; |
621 | } | 621 | } |
622 | 622 | ||
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 0cb73074c199..f24d44cdecc4 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -182,6 +182,11 @@ struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, | |||
182 | void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, | 182 | void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, |
183 | struct atio_from_isp *atio) | 183 | struct atio_from_isp *atio) |
184 | { | 184 | { |
185 | ql_dbg(ql_dbg_tgt, vha, 0xe072, | ||
186 | "%s: qla_target(%d): type %x ox_id %04x\n", | ||
187 | __func__, vha->vp_idx, atio->u.raw.entry_type, | ||
188 | be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); | ||
189 | |||
185 | switch (atio->u.raw.entry_type) { | 190 | switch (atio->u.raw.entry_type) { |
186 | case ATIO_TYPE7: | 191 | case ATIO_TYPE7: |
187 | { | 192 | { |
@@ -236,6 +241,10 @@ void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, | |||
236 | void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) | 241 | void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) |
237 | { | 242 | { |
238 | switch (pkt->entry_type) { | 243 | switch (pkt->entry_type) { |
244 | case CTIO_CRC2: | ||
245 | ql_dbg(ql_dbg_tgt, vha, 0xe073, | ||
246 | "qla_target(%d):%s: CRC2 Response pkt\n", | ||
247 | vha->vp_idx, __func__); | ||
239 | case CTIO_TYPE7: | 248 | case CTIO_TYPE7: |
240 | { | 249 | { |
241 | struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; | 250 | struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; |
@@ -1350,13 +1359,42 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) | |||
1350 | 1359 | ||
1351 | prm->cmd->sg_mapped = 1; | 1360 | prm->cmd->sg_mapped = 1; |
1352 | 1361 | ||
1353 | /* | 1362 | if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { |
1354 | * If greater than four sg entries then we need to allocate | 1363 | /* |
1355 | * the continuation entries | 1364 | * If greater than four sg entries then we need to allocate |
1356 | */ | 1365 | * the continuation entries |
1357 | if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) | 1366 | */ |
1358 | prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - | 1367 | if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) |
1359 | prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont); | 1368 | prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - |
1369 | prm->tgt->datasegs_per_cmd, | ||
1370 | prm->tgt->datasegs_per_cont); | ||
1371 | } else { | ||
1372 | /* DIF */ | ||
1373 | if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || | ||
1374 | (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { | ||
1375 | prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); | ||
1376 | prm->tot_dsds = prm->seg_cnt; | ||
1377 | } else | ||
1378 | prm->tot_dsds = prm->seg_cnt; | ||
1379 | |||
1380 | if (cmd->prot_sg_cnt) { | ||
1381 | prm->prot_sg = cmd->prot_sg; | ||
1382 | prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev, | ||
1383 | cmd->prot_sg, cmd->prot_sg_cnt, | ||
1384 | cmd->dma_data_direction); | ||
1385 | if (unlikely(prm->prot_seg_cnt == 0)) | ||
1386 | goto out_err; | ||
1387 | |||
1388 | if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || | ||
1389 | (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { | ||
1390 | /* Dif Bundling not support here */ | ||
1391 | prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, | ||
1392 | cmd->blk_sz); | ||
1393 | prm->tot_dsds += prm->prot_seg_cnt; | ||
1394 | } else | ||
1395 | prm->tot_dsds += prm->prot_seg_cnt; | ||
1396 | } | ||
1397 | } | ||
1360 | 1398 | ||
1361 | ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", | 1399 | ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", |
1362 | prm->seg_cnt, prm->req_cnt); | 1400 | prm->seg_cnt, prm->req_cnt); |
@@ -1377,6 +1415,16 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha, | |||
1377 | BUG_ON(!cmd->sg_mapped); | 1415 | BUG_ON(!cmd->sg_mapped); |
1378 | pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); | 1416 | pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); |
1379 | cmd->sg_mapped = 0; | 1417 | cmd->sg_mapped = 0; |
1418 | |||
1419 | if (cmd->prot_sg_cnt) | ||
1420 | pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, | ||
1421 | cmd->dma_data_direction); | ||
1422 | |||
1423 | if (cmd->ctx_dsd_alloced) | ||
1424 | qla2x00_clean_dsd_pool(ha, NULL, cmd); | ||
1425 | |||
1426 | if (cmd->ctx) | ||
1427 | dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); | ||
1380 | } | 1428 | } |
1381 | 1429 | ||
1382 | static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, | 1430 | static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, |
@@ -1665,8 +1713,9 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, | |||
1665 | return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; | 1713 | return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; |
1666 | } | 1714 | } |
1667 | 1715 | ||
1668 | ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n", | 1716 | ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n", |
1669 | vha->vp_idx, cmd->tag); | 1717 | vha->vp_idx, cmd->tag, |
1718 | be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); | ||
1670 | 1719 | ||
1671 | prm->cmd = cmd; | 1720 | prm->cmd = cmd; |
1672 | prm->tgt = tgt; | 1721 | prm->tgt = tgt; |
@@ -1902,6 +1951,323 @@ skip_explict_conf: | |||
1902 | /* Sense with len > 24, is it possible ??? */ | 1951 | /* Sense with len > 24, is it possible ??? */ |
1903 | } | 1952 | } |
1904 | 1953 | ||
1954 | |||
1955 | |||
1956 | /* diff */ | ||
1957 | static inline int | ||
1958 | qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) | ||
1959 | { | ||
1960 | /* | ||
1961 | * Uncomment when corresponding SCSI changes are done. | ||
1962 | * | ||
1963 | if (!sp->cmd->prot_chk) | ||
1964 | return 0; | ||
1965 | * | ||
1966 | */ | ||
1967 | switch (se_cmd->prot_op) { | ||
1968 | case TARGET_PROT_DOUT_INSERT: | ||
1969 | case TARGET_PROT_DIN_STRIP: | ||
1970 | if (ql2xenablehba_err_chk >= 1) | ||
1971 | return 1; | ||
1972 | break; | ||
1973 | case TARGET_PROT_DOUT_PASS: | ||
1974 | case TARGET_PROT_DIN_PASS: | ||
1975 | if (ql2xenablehba_err_chk >= 2) | ||
1976 | return 1; | ||
1977 | break; | ||
1978 | case TARGET_PROT_DIN_INSERT: | ||
1979 | case TARGET_PROT_DOUT_STRIP: | ||
1980 | return 1; | ||
1981 | default: | ||
1982 | break; | ||
1983 | } | ||
1984 | return 0; | ||
1985 | } | ||
1986 | |||
1987 | /* | ||
1988 | * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command | ||
1989 | * | ||
1990 | */ | ||
1991 | static inline void | ||
1992 | qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) | ||
1993 | { | ||
1994 | uint32_t lba = 0xffffffff & se_cmd->t_task_lba; | ||
1995 | |||
1996 | /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2 | ||
1997 | * have been immplemented by TCM, before AppTag is avail. | ||
1998 | * Look for modesense_handlers[] | ||
1999 | */ | ||
2000 | ctx->app_tag = __constant_cpu_to_le16(0); | ||
2001 | ctx->app_tag_mask[0] = 0x0; | ||
2002 | ctx->app_tag_mask[1] = 0x0; | ||
2003 | |||
2004 | switch (se_cmd->prot_type) { | ||
2005 | case TARGET_DIF_TYPE0_PROT: | ||
2006 | /* | ||
2007 | * No check for ql2xenablehba_err_chk, as it would be an | ||
2008 | * I/O error if hba tag generation is not done. | ||
2009 | */ | ||
2010 | ctx->ref_tag = cpu_to_le32(lba); | ||
2011 | |||
2012 | if (!qlt_hba_err_chk_enabled(se_cmd)) | ||
2013 | break; | ||
2014 | |||
2015 | /* enable ALL bytes of the ref tag */ | ||
2016 | ctx->ref_tag_mask[0] = 0xff; | ||
2017 | ctx->ref_tag_mask[1] = 0xff; | ||
2018 | ctx->ref_tag_mask[2] = 0xff; | ||
2019 | ctx->ref_tag_mask[3] = 0xff; | ||
2020 | break; | ||
2021 | /* | ||
2022 | * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and | ||
2023 | * 16 bit app tag. | ||
2024 | */ | ||
2025 | case TARGET_DIF_TYPE1_PROT: | ||
2026 | ctx->ref_tag = cpu_to_le32(lba); | ||
2027 | |||
2028 | if (!qlt_hba_err_chk_enabled(se_cmd)) | ||
2029 | break; | ||
2030 | |||
2031 | /* enable ALL bytes of the ref tag */ | ||
2032 | ctx->ref_tag_mask[0] = 0xff; | ||
2033 | ctx->ref_tag_mask[1] = 0xff; | ||
2034 | ctx->ref_tag_mask[2] = 0xff; | ||
2035 | ctx->ref_tag_mask[3] = 0xff; | ||
2036 | break; | ||
2037 | /* | ||
2038 | * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to | ||
2039 | * match LBA in CDB + N | ||
2040 | */ | ||
2041 | case TARGET_DIF_TYPE2_PROT: | ||
2042 | ctx->ref_tag = cpu_to_le32(lba); | ||
2043 | |||
2044 | if (!qlt_hba_err_chk_enabled(se_cmd)) | ||
2045 | break; | ||
2046 | |||
2047 | /* enable ALL bytes of the ref tag */ | ||
2048 | ctx->ref_tag_mask[0] = 0xff; | ||
2049 | ctx->ref_tag_mask[1] = 0xff; | ||
2050 | ctx->ref_tag_mask[2] = 0xff; | ||
2051 | ctx->ref_tag_mask[3] = 0xff; | ||
2052 | break; | ||
2053 | |||
2054 | /* For Type 3 protection: 16 bit GUARD only */ | ||
2055 | case TARGET_DIF_TYPE3_PROT: | ||
2056 | ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = | ||
2057 | ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; | ||
2058 | break; | ||
2059 | } | ||
2060 | } | ||
2061 | |||
2062 | |||
2063 | static inline int | ||
2064 | qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) | ||
2065 | { | ||
2066 | uint32_t *cur_dsd; | ||
2067 | int sgc; | ||
2068 | uint32_t transfer_length = 0; | ||
2069 | uint32_t data_bytes; | ||
2070 | uint32_t dif_bytes; | ||
2071 | uint8_t bundling = 1; | ||
2072 | uint8_t *clr_ptr; | ||
2073 | struct crc_context *crc_ctx_pkt = NULL; | ||
2074 | struct qla_hw_data *ha; | ||
2075 | struct ctio_crc2_to_fw *pkt; | ||
2076 | dma_addr_t crc_ctx_dma; | ||
2077 | uint16_t fw_prot_opts = 0; | ||
2078 | struct qla_tgt_cmd *cmd = prm->cmd; | ||
2079 | struct se_cmd *se_cmd = &cmd->se_cmd; | ||
2080 | uint32_t h; | ||
2081 | struct atio_from_isp *atio = &prm->cmd->atio; | ||
2082 | |||
2083 | sgc = 0; | ||
2084 | ha = vha->hw; | ||
2085 | |||
2086 | pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr; | ||
2087 | prm->pkt = pkt; | ||
2088 | memset(pkt, 0, sizeof(*pkt)); | ||
2089 | |||
2090 | ql_dbg(ql_dbg_tgt, vha, 0xe071, | ||
2091 | "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", | ||
2092 | vha->vp_idx, __func__, se_cmd, se_cmd->prot_op, | ||
2093 | prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); | ||
2094 | |||
2095 | if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || | ||
2096 | (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) | ||
2097 | bundling = 0; | ||
2098 | |||
2099 | /* Compute dif len and adjust data len to incude protection */ | ||
2100 | data_bytes = cmd->bufflen; | ||
2101 | dif_bytes = (data_bytes / cmd->blk_sz) * 8; | ||
2102 | |||
2103 | switch (se_cmd->prot_op) { | ||
2104 | case TARGET_PROT_DIN_INSERT: | ||
2105 | case TARGET_PROT_DOUT_STRIP: | ||
2106 | transfer_length = data_bytes; | ||
2107 | data_bytes += dif_bytes; | ||
2108 | break; | ||
2109 | |||
2110 | case TARGET_PROT_DIN_STRIP: | ||
2111 | case TARGET_PROT_DOUT_INSERT: | ||
2112 | case TARGET_PROT_DIN_PASS: | ||
2113 | case TARGET_PROT_DOUT_PASS: | ||
2114 | transfer_length = data_bytes + dif_bytes; | ||
2115 | break; | ||
2116 | |||
2117 | default: | ||
2118 | BUG(); | ||
2119 | break; | ||
2120 | } | ||
2121 | |||
2122 | if (!qlt_hba_err_chk_enabled(se_cmd)) | ||
2123 | fw_prot_opts |= 0x10; /* Disable Guard tag checking */ | ||
2124 | /* HBA error checking enabled */ | ||
2125 | else if (IS_PI_UNINIT_CAPABLE(ha)) { | ||
2126 | if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || | ||
2127 | (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) | ||
2128 | fw_prot_opts |= PO_DIS_VALD_APP_ESC; | ||
2129 | else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) | ||
2130 | fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; | ||
2131 | } | ||
2132 | |||
2133 | switch (se_cmd->prot_op) { | ||
2134 | case TARGET_PROT_DIN_INSERT: | ||
2135 | case TARGET_PROT_DOUT_INSERT: | ||
2136 | fw_prot_opts |= PO_MODE_DIF_INSERT; | ||
2137 | break; | ||
2138 | case TARGET_PROT_DIN_STRIP: | ||
2139 | case TARGET_PROT_DOUT_STRIP: | ||
2140 | fw_prot_opts |= PO_MODE_DIF_REMOVE; | ||
2141 | break; | ||
2142 | case TARGET_PROT_DIN_PASS: | ||
2143 | case TARGET_PROT_DOUT_PASS: | ||
2144 | fw_prot_opts |= PO_MODE_DIF_PASS; | ||
2145 | /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ | ||
2146 | break; | ||
2147 | default:/* Normal Request */ | ||
2148 | fw_prot_opts |= PO_MODE_DIF_PASS; | ||
2149 | break; | ||
2150 | } | ||
2151 | |||
2152 | |||
2153 | /* ---- PKT ---- */ | ||
2154 | /* Update entry type to indicate Command Type CRC_2 IOCB */ | ||
2155 | pkt->entry_type = CTIO_CRC2; | ||
2156 | pkt->entry_count = 1; | ||
2157 | pkt->vp_index = vha->vp_idx; | ||
2158 | |||
2159 | h = qlt_make_handle(vha); | ||
2160 | if (unlikely(h == QLA_TGT_NULL_HANDLE)) { | ||
2161 | /* | ||
2162 | * CTIO type 7 from the firmware doesn't provide a way to | ||
2163 | * know the initiator's LOOP ID, hence we can't find | ||
2164 | * the session and, so, the command. | ||
2165 | */ | ||
2166 | return -EAGAIN; | ||
2167 | } else | ||
2168 | ha->tgt.cmds[h-1] = prm->cmd; | ||
2169 | |||
2170 | |||
2171 | pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; | ||
2172 | pkt->nport_handle = prm->cmd->loop_id; | ||
2173 | pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); | ||
2174 | pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; | ||
2175 | pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; | ||
2176 | pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; | ||
2177 | pkt->exchange_addr = atio->u.isp24.exchange_addr; | ||
2178 | pkt->ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); | ||
2179 | pkt->flags |= (atio->u.isp24.attr << 9); | ||
2180 | pkt->relative_offset = cpu_to_le32(prm->cmd->offset); | ||
2181 | |||
2182 | /* Set transfer direction */ | ||
2183 | if (cmd->dma_data_direction == DMA_TO_DEVICE) | ||
2184 | pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN); | ||
2185 | else if (cmd->dma_data_direction == DMA_FROM_DEVICE) | ||
2186 | pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT); | ||
2187 | |||
2188 | |||
2189 | pkt->dseg_count = prm->tot_dsds; | ||
2190 | /* Fibre channel byte count */ | ||
2191 | pkt->transfer_length = cpu_to_le32(transfer_length); | ||
2192 | |||
2193 | |||
2194 | /* ----- CRC context -------- */ | ||
2195 | |||
2196 | /* Allocate CRC context from global pool */ | ||
2197 | crc_ctx_pkt = cmd->ctx = | ||
2198 | dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); | ||
2199 | |||
2200 | if (!crc_ctx_pkt) | ||
2201 | goto crc_queuing_error; | ||
2202 | |||
2203 | /* Zero out CTX area. */ | ||
2204 | clr_ptr = (uint8_t *)crc_ctx_pkt; | ||
2205 | memset(clr_ptr, 0, sizeof(*crc_ctx_pkt)); | ||
2206 | |||
2207 | crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; | ||
2208 | INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); | ||
2209 | |||
2210 | /* Set handle */ | ||
2211 | crc_ctx_pkt->handle = pkt->handle; | ||
2212 | |||
2213 | qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt); | ||
2214 | |||
2215 | pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); | ||
2216 | pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); | ||
2217 | pkt->crc_context_len = CRC_CONTEXT_LEN_FW; | ||
2218 | |||
2219 | |||
2220 | if (!bundling) { | ||
2221 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; | ||
2222 | } else { | ||
2223 | /* | ||
2224 | * Configure Bundling if we need to fetch interlaving | ||
2225 | * protection PCI accesses | ||
2226 | */ | ||
2227 | fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; | ||
2228 | crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); | ||
2229 | crc_ctx_pkt->u.bundling.dseg_count = | ||
2230 | cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); | ||
2231 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; | ||
2232 | } | ||
2233 | |||
2234 | /* Finish the common fields of CRC pkt */ | ||
2235 | crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); | ||
2236 | crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); | ||
2237 | crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); | ||
2238 | crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0); | ||
2239 | |||
2240 | |||
2241 | /* Walks data segments */ | ||
2242 | pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR); | ||
2243 | |||
2244 | if (!bundling && prm->prot_seg_cnt) { | ||
2245 | if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, | ||
2246 | prm->tot_dsds, cmd)) | ||
2247 | goto crc_queuing_error; | ||
2248 | } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, | ||
2249 | (prm->tot_dsds - prm->prot_seg_cnt), cmd)) | ||
2250 | goto crc_queuing_error; | ||
2251 | |||
2252 | if (bundling && prm->prot_seg_cnt) { | ||
2253 | /* Walks dif segments */ | ||
2254 | pkt->add_flags |= | ||
2255 | __constant_cpu_to_le16(CTIO_CRC2_AF_DIF_DSD_ENA); | ||
2256 | |||
2257 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; | ||
2258 | if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, | ||
2259 | prm->prot_seg_cnt, cmd)) | ||
2260 | goto crc_queuing_error; | ||
2261 | } | ||
2262 | return QLA_SUCCESS; | ||
2263 | |||
2264 | crc_queuing_error: | ||
2265 | /* Cleanup will be performed by the caller */ | ||
2266 | |||
2267 | return QLA_FUNCTION_FAILED; | ||
2268 | } | ||
2269 | |||
2270 | |||
1905 | /* | 2271 | /* |
1906 | * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * | 2272 | * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * |
1907 | * QLA_TGT_XMIT_STATUS for >= 24xx silicon | 2273 | * QLA_TGT_XMIT_STATUS for >= 24xx silicon |
@@ -1921,9 +2287,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, | |||
1921 | qlt_check_srr_debug(cmd, &xmit_type); | 2287 | qlt_check_srr_debug(cmd, &xmit_type); |
1922 | 2288 | ||
1923 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, | 2289 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, |
1924 | "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, " | 2290 | "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n", |
1925 | "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? | 2291 | (xmit_type & QLA_TGT_XMIT_STATUS) ? |
1926 | 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction); | 2292 | 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, |
2293 | &cmd->se_cmd); | ||
1927 | 2294 | ||
1928 | res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, | 2295 | res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, |
1929 | &full_req_cnt); | 2296 | &full_req_cnt); |
@@ -1941,7 +2308,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, | |||
1941 | if (unlikely(res)) | 2308 | if (unlikely(res)) |
1942 | goto out_unmap_unlock; | 2309 | goto out_unmap_unlock; |
1943 | 2310 | ||
1944 | res = qlt_24xx_build_ctio_pkt(&prm, vha); | 2311 | if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) |
2312 | res = qlt_build_ctio_crc2_pkt(&prm, vha); | ||
2313 | else | ||
2314 | res = qlt_24xx_build_ctio_pkt(&prm, vha); | ||
1945 | if (unlikely(res != 0)) | 2315 | if (unlikely(res != 0)) |
1946 | goto out_unmap_unlock; | 2316 | goto out_unmap_unlock; |
1947 | 2317 | ||
@@ -1953,7 +2323,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, | |||
1953 | __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | | 2323 | __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | |
1954 | CTIO7_FLAGS_STATUS_MODE_0); | 2324 | CTIO7_FLAGS_STATUS_MODE_0); |
1955 | 2325 | ||
1956 | qlt_load_data_segments(&prm, vha); | 2326 | if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) |
2327 | qlt_load_data_segments(&prm, vha); | ||
1957 | 2328 | ||
1958 | if (prm.add_status_pkt == 0) { | 2329 | if (prm.add_status_pkt == 0) { |
1959 | if (xmit_type & QLA_TGT_XMIT_STATUS) { | 2330 | if (xmit_type & QLA_TGT_XMIT_STATUS) { |
@@ -1983,8 +2354,14 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, | |||
1983 | ql_dbg(ql_dbg_tgt, vha, 0xe019, | 2354 | ql_dbg(ql_dbg_tgt, vha, 0xe019, |
1984 | "Building additional status packet\n"); | 2355 | "Building additional status packet\n"); |
1985 | 2356 | ||
2357 | /* | ||
2358 | * T10Dif: ctio_crc2_to_fw overlay ontop of | ||
2359 | * ctio7_to_24xx | ||
2360 | */ | ||
1986 | memcpy(ctio, pkt, sizeof(*ctio)); | 2361 | memcpy(ctio, pkt, sizeof(*ctio)); |
2362 | /* reset back to CTIO7 */ | ||
1987 | ctio->entry_count = 1; | 2363 | ctio->entry_count = 1; |
2364 | ctio->entry_type = CTIO_TYPE7; | ||
1988 | ctio->dseg_count = 0; | 2365 | ctio->dseg_count = 0; |
1989 | ctio->u.status1.flags &= ~__constant_cpu_to_le16( | 2366 | ctio->u.status1.flags &= ~__constant_cpu_to_le16( |
1990 | CTIO7_FLAGS_DATA_IN); | 2367 | CTIO7_FLAGS_DATA_IN); |
@@ -1993,6 +2370,11 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, | |||
1993 | pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; | 2370 | pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; |
1994 | pkt->u.status0.flags |= __constant_cpu_to_le16( | 2371 | pkt->u.status0.flags |= __constant_cpu_to_le16( |
1995 | CTIO7_FLAGS_DONT_RET_CTIO); | 2372 | CTIO7_FLAGS_DONT_RET_CTIO); |
2373 | |||
2374 | /* qlt_24xx_init_ctio_to_isp will correct | ||
2375 | * all neccessary fields that's part of CTIO7. | ||
2376 | * There should be no residual of CTIO-CRC2 data. | ||
2377 | */ | ||
1996 | qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, | 2378 | qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, |
1997 | &prm); | 2379 | &prm); |
1998 | pr_debug("Status CTIO7: %p\n", ctio); | 2380 | pr_debug("Status CTIO7: %p\n", ctio); |
@@ -2041,8 +2423,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) | |||
2041 | if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) | 2423 | if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) |
2042 | return -EIO; | 2424 | return -EIO; |
2043 | 2425 | ||
2044 | ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)", | 2426 | ql_dbg(ql_dbg_tgt, vha, 0xe01b, |
2045 | (int)vha->vp_idx); | 2427 | "%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n", |
2428 | __func__, (int)vha->vp_idx, &cmd->se_cmd, | ||
2429 | be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); | ||
2046 | 2430 | ||
2047 | /* Calculate number of entries and segments required */ | 2431 | /* Calculate number of entries and segments required */ |
2048 | if (qlt_pci_map_calc_cnt(&prm) != 0) | 2432 | if (qlt_pci_map_calc_cnt(&prm) != 0) |
@@ -2054,14 +2438,19 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) | |||
2054 | res = qlt_check_reserve_free_req(vha, prm.req_cnt); | 2438 | res = qlt_check_reserve_free_req(vha, prm.req_cnt); |
2055 | if (res != 0) | 2439 | if (res != 0) |
2056 | goto out_unlock_free_unmap; | 2440 | goto out_unlock_free_unmap; |
2441 | if (cmd->se_cmd.prot_op) | ||
2442 | res = qlt_build_ctio_crc2_pkt(&prm, vha); | ||
2443 | else | ||
2444 | res = qlt_24xx_build_ctio_pkt(&prm, vha); | ||
2057 | 2445 | ||
2058 | res = qlt_24xx_build_ctio_pkt(&prm, vha); | ||
2059 | if (unlikely(res != 0)) | 2446 | if (unlikely(res != 0)) |
2060 | goto out_unlock_free_unmap; | 2447 | goto out_unlock_free_unmap; |
2061 | pkt = (struct ctio7_to_24xx *)prm.pkt; | 2448 | pkt = (struct ctio7_to_24xx *)prm.pkt; |
2062 | pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | | 2449 | pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | |
2063 | CTIO7_FLAGS_STATUS_MODE_0); | 2450 | CTIO7_FLAGS_STATUS_MODE_0); |
2064 | qlt_load_data_segments(&prm, vha); | 2451 | |
2452 | if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) | ||
2453 | qlt_load_data_segments(&prm, vha); | ||
2065 | 2454 | ||
2066 | cmd->state = QLA_TGT_STATE_NEED_DATA; | 2455 | cmd->state = QLA_TGT_STATE_NEED_DATA; |
2067 | 2456 | ||
@@ -2079,6 +2468,143 @@ out_unlock_free_unmap: | |||
2079 | } | 2468 | } |
2080 | EXPORT_SYMBOL(qlt_rdy_to_xfer); | 2469 | EXPORT_SYMBOL(qlt_rdy_to_xfer); |
2081 | 2470 | ||
2471 | |||
2472 | /* | ||
2473 | * Checks the guard or meta-data for the type of error | ||
2474 | * detected by the HBA. | ||
2475 | */ | ||
2476 | static inline int | ||
2477 | qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, | ||
2478 | struct ctio_crc_from_fw *sts) | ||
2479 | { | ||
2480 | uint8_t *ap = &sts->actual_dif[0]; | ||
2481 | uint8_t *ep = &sts->expected_dif[0]; | ||
2482 | uint32_t e_ref_tag, a_ref_tag; | ||
2483 | uint16_t e_app_tag, a_app_tag; | ||
2484 | uint16_t e_guard, a_guard; | ||
2485 | uint64_t lba = cmd->se_cmd.t_task_lba; | ||
2486 | |||
2487 | a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); | ||
2488 | a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); | ||
2489 | a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); | ||
2490 | |||
2491 | e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); | ||
2492 | e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); | ||
2493 | e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); | ||
2494 | |||
2495 | ql_dbg(ql_dbg_tgt, vha, 0xe075, | ||
2496 | "iocb(s) %p Returned STATUS.\n", sts); | ||
2497 | |||
2498 | ql_dbg(ql_dbg_tgt, vha, 0xf075, | ||
2499 | "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", | ||
2500 | cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, | ||
2501 | a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); | ||
2502 | |||
2503 | /* | ||
2504 | * Ignore sector if: | ||
2505 | * For type 3: ref & app tag is all 'f's | ||
2506 | * For type 0,1,2: app tag is all 'f's | ||
2507 | */ | ||
2508 | if ((a_app_tag == 0xffff) && | ||
2509 | ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) || | ||
2510 | (a_ref_tag == 0xffffffff))) { | ||
2511 | uint32_t blocks_done; | ||
2512 | |||
2513 | /* 2TB boundary case covered automatically with this */ | ||
2514 | blocks_done = e_ref_tag - (uint32_t)lba + 1; | ||
2515 | cmd->se_cmd.bad_sector = e_ref_tag; | ||
2516 | cmd->se_cmd.pi_err = 0; | ||
2517 | ql_dbg(ql_dbg_tgt, vha, 0xf074, | ||
2518 | "need to return scsi good\n"); | ||
2519 | |||
2520 | /* Update protection tag */ | ||
2521 | if (cmd->prot_sg_cnt) { | ||
2522 | uint32_t i, j = 0, k = 0, num_ent; | ||
2523 | struct scatterlist *sg, *sgl; | ||
2524 | |||
2525 | |||
2526 | sgl = cmd->prot_sg; | ||
2527 | |||
2528 | /* Patch the corresponding protection tags */ | ||
2529 | for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) { | ||
2530 | num_ent = sg_dma_len(sg) / 8; | ||
2531 | if (k + num_ent < blocks_done) { | ||
2532 | k += num_ent; | ||
2533 | continue; | ||
2534 | } | ||
2535 | j = blocks_done - k - 1; | ||
2536 | k = blocks_done; | ||
2537 | break; | ||
2538 | } | ||
2539 | |||
2540 | if (k != blocks_done) { | ||
2541 | ql_log(ql_log_warn, vha, 0xf076, | ||
2542 | "unexpected tag values tag:lba=%u:%llu)\n", | ||
2543 | e_ref_tag, (unsigned long long)lba); | ||
2544 | goto out; | ||
2545 | } | ||
2546 | |||
2547 | #if 0 | ||
2548 | struct sd_dif_tuple *spt; | ||
2549 | /* TODO: | ||
2550 | * This section came from initiator. Is it valid here? | ||
2551 | * should ulp be override with actual val??? | ||
2552 | */ | ||
2553 | spt = page_address(sg_page(sg)) + sg->offset; | ||
2554 | spt += j; | ||
2555 | |||
2556 | spt->app_tag = 0xffff; | ||
2557 | if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3) | ||
2558 | spt->ref_tag = 0xffffffff; | ||
2559 | #endif | ||
2560 | } | ||
2561 | |||
2562 | return 0; | ||
2563 | } | ||
2564 | |||
2565 | /* check guard */ | ||
2566 | if (e_guard != a_guard) { | ||
2567 | cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; | ||
2568 | cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; | ||
2569 | |||
2570 | ql_log(ql_log_warn, vha, 0xe076, | ||
2571 | "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", | ||
2572 | cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, | ||
2573 | a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, | ||
2574 | a_guard, e_guard, cmd); | ||
2575 | goto out; | ||
2576 | } | ||
2577 | |||
2578 | /* check ref tag */ | ||
2579 | if (e_ref_tag != a_ref_tag) { | ||
2580 | cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; | ||
2581 | cmd->se_cmd.bad_sector = e_ref_tag; | ||
2582 | |||
2583 | ql_log(ql_log_warn, vha, 0xe077, | ||
2584 | "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", | ||
2585 | cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, | ||
2586 | a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, | ||
2587 | a_guard, e_guard, cmd); | ||
2588 | goto out; | ||
2589 | } | ||
2590 | |||
2591 | /* check appl tag */ | ||
2592 | if (e_app_tag != a_app_tag) { | ||
2593 | cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; | ||
2594 | cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; | ||
2595 | |||
2596 | ql_log(ql_log_warn, vha, 0xe078, | ||
2597 | "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", | ||
2598 | cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, | ||
2599 | a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, | ||
2600 | a_guard, e_guard, cmd); | ||
2601 | goto out; | ||
2602 | } | ||
2603 | out: | ||
2604 | return 1; | ||
2605 | } | ||
2606 | |||
2607 | |||
2082 | /* If hardware_lock held on entry, might drop it, then reaquire */ | 2608 | /* If hardware_lock held on entry, might drop it, then reaquire */ |
2083 | /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ | 2609 | /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ |
2084 | static int __qlt_send_term_exchange(struct scsi_qla_host *vha, | 2610 | static int __qlt_send_term_exchange(struct scsi_qla_host *vha, |
@@ -2159,14 +2685,20 @@ done: | |||
2159 | if (!ha_locked && !in_interrupt()) | 2685 | if (!ha_locked && !in_interrupt()) |
2160 | msleep(250); /* just in case */ | 2686 | msleep(250); /* just in case */ |
2161 | 2687 | ||
2688 | if (cmd->sg_mapped) | ||
2689 | qlt_unmap_sg(vha, cmd); | ||
2162 | vha->hw->tgt.tgt_ops->free_cmd(cmd); | 2690 | vha->hw->tgt.tgt_ops->free_cmd(cmd); |
2163 | } | 2691 | } |
2164 | } | 2692 | } |
2165 | 2693 | ||
2166 | void qlt_free_cmd(struct qla_tgt_cmd *cmd) | 2694 | void qlt_free_cmd(struct qla_tgt_cmd *cmd) |
2167 | { | 2695 | { |
2168 | BUG_ON(cmd->sg_mapped); | 2696 | ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, |
2697 | "%s: se_cmd[%p] ox_id %04x\n", | ||
2698 | __func__, &cmd->se_cmd, | ||
2699 | be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); | ||
2169 | 2700 | ||
2701 | BUG_ON(cmd->sg_mapped); | ||
2170 | if (unlikely(cmd->free_sg)) | 2702 | if (unlikely(cmd->free_sg)) |
2171 | kfree(cmd->sg); | 2703 | kfree(cmd->sg); |
2172 | kmem_cache_free(qla_tgt_cmd_cachep, cmd); | 2704 | kmem_cache_free(qla_tgt_cmd_cachep, cmd); |
@@ -2404,10 +2936,40 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, | |||
2404 | else | 2936 | else |
2405 | return; | 2937 | return; |
2406 | 2938 | ||
2939 | case CTIO_DIF_ERROR: { | ||
2940 | struct ctio_crc_from_fw *crc = | ||
2941 | (struct ctio_crc_from_fw *)ctio; | ||
2942 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, | ||
2943 | "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n", | ||
2944 | vha->vp_idx, status, cmd->state, se_cmd, | ||
2945 | *((u64 *)&crc->actual_dif[0]), | ||
2946 | *((u64 *)&crc->expected_dif[0])); | ||
2947 | |||
2948 | if (qlt_handle_dif_error(vha, cmd, ctio)) { | ||
2949 | if (cmd->state == QLA_TGT_STATE_NEED_DATA) { | ||
2950 | /* scsi Write/xfer rdy complete */ | ||
2951 | goto skip_term; | ||
2952 | } else { | ||
2953 | /* scsi read/xmit respond complete | ||
2954 | * call handle dif to send scsi status | ||
2955 | * rather than terminate exchange. | ||
2956 | */ | ||
2957 | cmd->state = QLA_TGT_STATE_PROCESSED; | ||
2958 | ha->tgt.tgt_ops->handle_dif_err(cmd); | ||
2959 | return; | ||
2960 | } | ||
2961 | } else { | ||
2962 | /* Need to generate a SCSI good completion. | ||
2963 | * because FW did not send scsi status. | ||
2964 | */ | ||
2965 | status = 0; | ||
2966 | goto skip_term; | ||
2967 | } | ||
2968 | break; | ||
2969 | } | ||
2407 | default: | 2970 | default: |
2408 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, | 2971 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, |
2409 | "qla_target(%d): CTIO with error status " | 2972 | "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", |
2410 | "0x%x received (state %x, se_cmd %p\n", | ||
2411 | vha->vp_idx, status, cmd->state, se_cmd); | 2973 | vha->vp_idx, status, cmd->state, se_cmd); |
2412 | break; | 2974 | break; |
2413 | } | 2975 | } |
@@ -2416,6 +2978,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, | |||
2416 | if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) | 2978 | if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) |
2417 | return; | 2979 | return; |
2418 | } | 2980 | } |
2981 | skip_term: | ||
2419 | 2982 | ||
2420 | if (cmd->state == QLA_TGT_STATE_PROCESSED) { | 2983 | if (cmd->state == QLA_TGT_STATE_PROCESSED) { |
2421 | ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); | 2984 | ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); |
@@ -2563,8 +3126,9 @@ static void qlt_do_work(struct work_struct *work) | |||
2563 | atio->u.isp24.fcp_cmnd.add_cdb_len])); | 3126 | atio->u.isp24.fcp_cmnd.add_cdb_len])); |
2564 | 3127 | ||
2565 | ql_dbg(ql_dbg_tgt, vha, 0xe022, | 3128 | ql_dbg(ql_dbg_tgt, vha, 0xe022, |
2566 | "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n", | 3129 | "qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n", |
2567 | cmd, cmd->unpacked_lun, cmd->tag); | 3130 | cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length, |
3131 | cmd->atio.u.isp24.fcp_hdr.ox_id); | ||
2568 | 3132 | ||
2569 | ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, | 3133 | ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, |
2570 | fcp_task_attr, data_dir, bidi); | 3134 | fcp_task_attr, data_dir, bidi); |
@@ -3527,11 +4091,11 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, | |||
3527 | switch (atio->u.raw.entry_type) { | 4091 | switch (atio->u.raw.entry_type) { |
3528 | case ATIO_TYPE7: | 4092 | case ATIO_TYPE7: |
3529 | ql_dbg(ql_dbg_tgt, vha, 0xe02d, | 4093 | ql_dbg(ql_dbg_tgt, vha, 0xe02d, |
3530 | "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, " | 4094 | "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n", |
3531 | "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n", | ||
3532 | vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, | 4095 | vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, |
3533 | atio->u.isp24.fcp_cmnd.rddata, | 4096 | atio->u.isp24.fcp_cmnd.rddata, |
3534 | atio->u.isp24.fcp_cmnd.wrdata, | 4097 | atio->u.isp24.fcp_cmnd.wrdata, |
4098 | atio->u.isp24.fcp_cmnd.cdb[0], | ||
3535 | atio->u.isp24.fcp_cmnd.add_cdb_len, | 4099 | atio->u.isp24.fcp_cmnd.add_cdb_len, |
3536 | be32_to_cpu(get_unaligned((uint32_t *) | 4100 | be32_to_cpu(get_unaligned((uint32_t *) |
3537 | &atio->u.isp24.fcp_cmnd.add_cdb[ | 4101 | &atio->u.isp24.fcp_cmnd.add_cdb[ |
@@ -3629,11 +4193,13 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) | |||
3629 | tgt->irq_cmd_count++; | 4193 | tgt->irq_cmd_count++; |
3630 | 4194 | ||
3631 | switch (pkt->entry_type) { | 4195 | switch (pkt->entry_type) { |
4196 | case CTIO_CRC2: | ||
3632 | case CTIO_TYPE7: | 4197 | case CTIO_TYPE7: |
3633 | { | 4198 | { |
3634 | struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; | 4199 | struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; |
3635 | ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n", | 4200 | ql_dbg(ql_dbg_tgt, vha, 0xe030, |
3636 | vha->vp_idx); | 4201 | "CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n", |
4202 | entry->entry_type, vha->vp_idx); | ||
3637 | qlt_do_ctio_completion(vha, entry->handle, | 4203 | qlt_do_ctio_completion(vha, entry->handle, |
3638 | le16_to_cpu(entry->status)|(pkt->entry_status << 16), | 4204 | le16_to_cpu(entry->status)|(pkt->entry_status << 16), |
3639 | entry); | 4205 | entry); |
@@ -4768,6 +5334,7 @@ qlt_24xx_process_response_error(struct scsi_qla_host *vha, | |||
4768 | case ABTS_RESP_24XX: | 5334 | case ABTS_RESP_24XX: |
4769 | case CTIO_TYPE7: | 5335 | case CTIO_TYPE7: |
4770 | case NOTIFY_ACK_TYPE: | 5336 | case NOTIFY_ACK_TYPE: |
5337 | case CTIO_CRC2: | ||
4771 | return 1; | 5338 | return 1; |
4772 | default: | 5339 | default: |
4773 | return 0; | 5340 | return 0; |
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index ce33d8c26406..f873e10451d2 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h | |||
@@ -293,6 +293,7 @@ struct ctio_to_2xxx { | |||
293 | #define CTIO_ABORTED 0x02 | 293 | #define CTIO_ABORTED 0x02 |
294 | #define CTIO_INVALID_RX_ID 0x08 | 294 | #define CTIO_INVALID_RX_ID 0x08 |
295 | #define CTIO_TIMEOUT 0x0B | 295 | #define CTIO_TIMEOUT 0x0B |
296 | #define CTIO_DIF_ERROR 0x0C /* DIF error detected */ | ||
296 | #define CTIO_LIP_RESET 0x0E | 297 | #define CTIO_LIP_RESET 0x0E |
297 | #define CTIO_TARGET_RESET 0x17 | 298 | #define CTIO_TARGET_RESET 0x17 |
298 | #define CTIO_PORT_UNAVAILABLE 0x28 | 299 | #define CTIO_PORT_UNAVAILABLE 0x28 |
@@ -498,11 +499,12 @@ struct ctio7_from_24xx { | |||
498 | #define CTIO7_FLAGS_DONT_RET_CTIO BIT_8 | 499 | #define CTIO7_FLAGS_DONT_RET_CTIO BIT_8 |
499 | #define CTIO7_FLAGS_STATUS_MODE_0 0 | 500 | #define CTIO7_FLAGS_STATUS_MODE_0 0 |
500 | #define CTIO7_FLAGS_STATUS_MODE_1 BIT_6 | 501 | #define CTIO7_FLAGS_STATUS_MODE_1 BIT_6 |
502 | #define CTIO7_FLAGS_STATUS_MODE_2 BIT_7 | ||
501 | #define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5 | 503 | #define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5 |
502 | #define CTIO7_FLAGS_CONFIRM_SATISF BIT_4 | 504 | #define CTIO7_FLAGS_CONFIRM_SATISF BIT_4 |
503 | #define CTIO7_FLAGS_DSD_PTR BIT_2 | 505 | #define CTIO7_FLAGS_DSD_PTR BIT_2 |
504 | #define CTIO7_FLAGS_DATA_IN BIT_1 | 506 | #define CTIO7_FLAGS_DATA_IN BIT_1 /* data to initiator */ |
505 | #define CTIO7_FLAGS_DATA_OUT BIT_0 | 507 | #define CTIO7_FLAGS_DATA_OUT BIT_0 /* data from initiator */ |
506 | 508 | ||
507 | #define ELS_PLOGI 0x3 | 509 | #define ELS_PLOGI 0x3 |
508 | #define ELS_FLOGI 0x4 | 510 | #define ELS_FLOGI 0x4 |
@@ -514,6 +516,68 @@ struct ctio7_from_24xx { | |||
514 | #define ELS_ADISC 0x52 | 516 | #define ELS_ADISC 0x52 |
515 | 517 | ||
516 | /* | 518 | /* |
519 | *CTIO Type CRC_2 IOCB | ||
520 | */ | ||
521 | struct ctio_crc2_to_fw { | ||
522 | uint8_t entry_type; /* Entry type. */ | ||
523 | #define CTIO_CRC2 0x7A | ||
524 | uint8_t entry_count; /* Entry count. */ | ||
525 | uint8_t sys_define; /* System defined. */ | ||
526 | uint8_t entry_status; /* Entry Status. */ | ||
527 | |||
528 | uint32_t handle; /* System handle. */ | ||
529 | uint16_t nport_handle; /* N_PORT handle. */ | ||
530 | uint16_t timeout; /* Command timeout. */ | ||
531 | |||
532 | uint16_t dseg_count; /* Data segment count. */ | ||
533 | uint8_t vp_index; | ||
534 | uint8_t add_flags; /* additional flags */ | ||
535 | #define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3 | ||
536 | |||
537 | uint8_t initiator_id[3]; /* initiator ID */ | ||
538 | uint8_t reserved1; | ||
539 | uint32_t exchange_addr; /* rcv exchange address */ | ||
540 | uint16_t reserved2; | ||
541 | uint16_t flags; /* refer to CTIO7 flags values */ | ||
542 | uint32_t residual; | ||
543 | uint16_t ox_id; | ||
544 | uint16_t scsi_status; | ||
545 | uint32_t relative_offset; | ||
546 | uint32_t reserved5; | ||
547 | uint32_t transfer_length; /* total fc transfer length */ | ||
548 | uint32_t reserved6; | ||
549 | uint32_t crc_context_address[2];/* Data segment address. */ | ||
550 | uint16_t crc_context_len; /* Data segment length. */ | ||
551 | uint16_t reserved_1; /* MUST be set to 0. */ | ||
552 | } __packed; | ||
553 | |||
554 | /* CTIO Type CRC_x Status IOCB */ | ||
555 | struct ctio_crc_from_fw { | ||
556 | uint8_t entry_type; /* Entry type. */ | ||
557 | uint8_t entry_count; /* Entry count. */ | ||
558 | uint8_t sys_define; /* System defined. */ | ||
559 | uint8_t entry_status; /* Entry Status. */ | ||
560 | |||
561 | uint32_t handle; /* System handle. */ | ||
562 | uint16_t status; | ||
563 | uint16_t timeout; /* Command timeout. */ | ||
564 | uint16_t dseg_count; /* Data segment count. */ | ||
565 | uint32_t reserved1; | ||
566 | uint16_t state_flags; | ||
567 | #define CTIO_CRC_SF_DIF_CHOPPED BIT_4 | ||
568 | |||
569 | uint32_t exchange_address; /* rcv exchange address */ | ||
570 | uint16_t reserved2; | ||
571 | uint16_t flags; | ||
572 | uint32_t resid_xfer_length; | ||
573 | uint16_t ox_id; | ||
574 | uint8_t reserved3[12]; | ||
575 | uint16_t runt_guard; /* reported runt blk guard */ | ||
576 | uint8_t actual_dif[8]; | ||
577 | uint8_t expected_dif[8]; | ||
578 | } __packed; | ||
579 | |||
580 | /* | ||
517 | * ISP queue - ABTS received/response entries structure definition for 24xx. | 581 | * ISP queue - ABTS received/response entries structure definition for 24xx. |
518 | */ | 582 | */ |
519 | #define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */ | 583 | #define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */ |
@@ -641,6 +705,7 @@ struct qla_tgt_func_tmpl { | |||
641 | int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, | 705 | int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, |
642 | unsigned char *, uint32_t, int, int, int); | 706 | unsigned char *, uint32_t, int, int, int); |
643 | void (*handle_data)(struct qla_tgt_cmd *); | 707 | void (*handle_data)(struct qla_tgt_cmd *); |
708 | void (*handle_dif_err)(struct qla_tgt_cmd *); | ||
644 | int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, | 709 | int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, |
645 | uint32_t); | 710 | uint32_t); |
646 | void (*free_cmd)(struct qla_tgt_cmd *); | 711 | void (*free_cmd)(struct qla_tgt_cmd *); |
@@ -829,9 +894,9 @@ struct qla_tgt_sess { | |||
829 | }; | 894 | }; |
830 | 895 | ||
831 | struct qla_tgt_cmd { | 896 | struct qla_tgt_cmd { |
897 | struct se_cmd se_cmd; | ||
832 | struct qla_tgt_sess *sess; | 898 | struct qla_tgt_sess *sess; |
833 | int state; | 899 | int state; |
834 | struct se_cmd se_cmd; | ||
835 | struct work_struct free_work; | 900 | struct work_struct free_work; |
836 | struct work_struct work; | 901 | struct work_struct work; |
837 | /* Sense buffer that will be mapped into outgoing status */ | 902 | /* Sense buffer that will be mapped into outgoing status */ |
@@ -843,6 +908,7 @@ struct qla_tgt_cmd { | |||
843 | unsigned int free_sg:1; | 908 | unsigned int free_sg:1; |
844 | unsigned int aborted:1; /* Needed in case of SRR */ | 909 | unsigned int aborted:1; /* Needed in case of SRR */ |
845 | unsigned int write_data_transferred:1; | 910 | unsigned int write_data_transferred:1; |
911 | unsigned int ctx_dsd_alloced:1; | ||
846 | 912 | ||
847 | struct scatterlist *sg; /* cmd data buffer SG vector */ | 913 | struct scatterlist *sg; /* cmd data buffer SG vector */ |
848 | int sg_cnt; /* SG segments count */ | 914 | int sg_cnt; /* SG segments count */ |
@@ -857,6 +923,12 @@ struct qla_tgt_cmd { | |||
857 | struct scsi_qla_host *vha; | 923 | struct scsi_qla_host *vha; |
858 | 924 | ||
859 | struct atio_from_isp atio; | 925 | struct atio_from_isp atio; |
926 | /* t10dif */ | ||
927 | struct scatterlist *prot_sg; | ||
928 | uint32_t prot_sg_cnt; | ||
929 | uint32_t blk_sz; | ||
930 | struct crc_context *ctx; | ||
931 | |||
860 | }; | 932 | }; |
861 | 933 | ||
862 | struct qla_tgt_sess_work_param { | 934 | struct qla_tgt_sess_work_param { |
@@ -901,6 +973,10 @@ struct qla_tgt_prm { | |||
901 | int sense_buffer_len; | 973 | int sense_buffer_len; |
902 | int residual; | 974 | int residual; |
903 | int add_status_pkt; | 975 | int add_status_pkt; |
976 | /* dif */ | ||
977 | struct scatterlist *prot_sg; | ||
978 | uint16_t prot_seg_cnt; | ||
979 | uint16_t tot_dsds; | ||
904 | }; | 980 | }; |
905 | 981 | ||
906 | struct qla_tgt_srr_imm { | 982 | struct qla_tgt_srr_imm { |
@@ -976,6 +1052,8 @@ extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *, | |||
976 | extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); | 1052 | extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); |
977 | extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); | 1053 | extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); |
978 | extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); | 1054 | extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); |
1055 | extern int qlt_rdy_to_xfer_dif(struct qla_tgt_cmd *); | ||
1056 | extern int qlt_xmit_response_dif(struct qla_tgt_cmd *, int, uint8_t); | ||
979 | extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); | 1057 | extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); |
980 | extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); | 1058 | extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); |
981 | extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); | 1059 | extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 68fb66fdb757..896cb23adb77 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
@@ -472,6 +472,11 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) | |||
472 | cmd->sg_cnt = se_cmd->t_data_nents; | 472 | cmd->sg_cnt = se_cmd->t_data_nents; |
473 | cmd->sg = se_cmd->t_data_sg; | 473 | cmd->sg = se_cmd->t_data_sg; |
474 | 474 | ||
475 | cmd->prot_sg_cnt = se_cmd->t_prot_nents; | ||
476 | cmd->prot_sg = se_cmd->t_prot_sg; | ||
477 | cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; | ||
478 | se_cmd->pi_err = 0; | ||
479 | |||
475 | /* | 480 | /* |
476 | * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup | 481 | * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup |
477 | * the SGL mappings into PCIe memory for incoming FCP WRITE data. | 482 | * the SGL mappings into PCIe memory for incoming FCP WRITE data. |
@@ -567,8 +572,13 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) | |||
567 | return; | 572 | return; |
568 | } | 573 | } |
569 | 574 | ||
570 | transport_generic_request_failure(&cmd->se_cmd, | 575 | if (cmd->se_cmd.pi_err) |
571 | TCM_CHECK_CONDITION_ABORT_CMD); | 576 | transport_generic_request_failure(&cmd->se_cmd, |
577 | cmd->se_cmd.pi_err); | ||
578 | else | ||
579 | transport_generic_request_failure(&cmd->se_cmd, | ||
580 | TCM_CHECK_CONDITION_ABORT_CMD); | ||
581 | |||
572 | return; | 582 | return; |
573 | } | 583 | } |
574 | 584 | ||
@@ -584,6 +594,27 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) | |||
584 | queue_work(tcm_qla2xxx_free_wq, &cmd->work); | 594 | queue_work(tcm_qla2xxx_free_wq, &cmd->work); |
585 | } | 595 | } |
586 | 596 | ||
597 | static void tcm_qla2xxx_handle_dif_work(struct work_struct *work) | ||
598 | { | ||
599 | struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); | ||
600 | |||
601 | /* take an extra kref to prevent cmd free too early. | ||
602 | * need to wait for SCSI status/check condition to | ||
603 | * finish responding generate by transport_generic_request_failure. | ||
604 | */ | ||
605 | kref_get(&cmd->se_cmd.cmd_kref); | ||
606 | transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err); | ||
607 | } | ||
608 | |||
609 | /* | ||
610 | * Called from qla_target.c:qlt_do_ctio_completion() | ||
611 | */ | ||
612 | static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd) | ||
613 | { | ||
614 | INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work); | ||
615 | queue_work(tcm_qla2xxx_free_wq, &cmd->work); | ||
616 | } | ||
617 | |||
587 | /* | 618 | /* |
588 | * Called from qla_target.c:qlt_issue_task_mgmt() | 619 | * Called from qla_target.c:qlt_issue_task_mgmt() |
589 | */ | 620 | */ |
@@ -610,6 +641,11 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) | |||
610 | cmd->sg = se_cmd->t_data_sg; | 641 | cmd->sg = se_cmd->t_data_sg; |
611 | cmd->offset = 0; | 642 | cmd->offset = 0; |
612 | 643 | ||
644 | cmd->prot_sg_cnt = se_cmd->t_prot_nents; | ||
645 | cmd->prot_sg = se_cmd->t_prot_sg; | ||
646 | cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; | ||
647 | se_cmd->pi_err = 0; | ||
648 | |||
613 | /* | 649 | /* |
614 | * Now queue completed DATA_IN the qla2xxx LLD and response ring | 650 | * Now queue completed DATA_IN the qla2xxx LLD and response ring |
615 | */ | 651 | */ |
@@ -1600,6 +1636,7 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, | |||
1600 | static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { | 1636 | static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { |
1601 | .handle_cmd = tcm_qla2xxx_handle_cmd, | 1637 | .handle_cmd = tcm_qla2xxx_handle_cmd, |
1602 | .handle_data = tcm_qla2xxx_handle_data, | 1638 | .handle_data = tcm_qla2xxx_handle_data, |
1639 | .handle_dif_err = tcm_qla2xxx_handle_dif_err, | ||
1603 | .handle_tmr = tcm_qla2xxx_handle_tmr, | 1640 | .handle_tmr = tcm_qla2xxx_handle_tmr, |
1604 | .free_cmd = tcm_qla2xxx_free_cmd, | 1641 | .free_cmd = tcm_qla2xxx_free_cmd, |
1605 | .free_mcmd = tcm_qla2xxx_free_mcmd, | 1642 | .free_mcmd = tcm_qla2xxx_free_mcmd, |