diff options
-rw-r--r-- | drivers/scsi/qla2xxx/qla_attr.c | 16 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_dbg.c | 58 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_dbg.h | 10 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_def.h | 84 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_fw.h | 47 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_gbl.h | 4 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_inline.h | 16 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_iocb.c | 678 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_isr.c | 77 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_os.c | 75 |
10 files changed, 1053 insertions, 12 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 5c98b097cfda..62a22cfae20e 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1726,6 +1726,22 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) | |||
1726 | fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); | 1726 | fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); |
1727 | } | 1727 | } |
1728 | 1728 | ||
1729 | if (IS_QLA25XX(ha) && ql2xenabledif) { | ||
1730 | if (ha->fw_attributes & BIT_4) { | ||
1731 | vha->flags.difdix_supported = 1; | ||
1732 | DEBUG18(qla_printk(KERN_INFO, ha, | ||
1733 | "Registering for DIF/DIX type 1 and 3" | ||
1734 | " protection.\n")); | ||
1735 | scsi_host_set_prot(vha->host, | ||
1736 | SHOST_DIF_TYPE1_PROTECTION | ||
1737 | | SHOST_DIF_TYPE3_PROTECTION | ||
1738 | | SHOST_DIX_TYPE1_PROTECTION | ||
1739 | | SHOST_DIX_TYPE3_PROTECTION); | ||
1740 | scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC); | ||
1741 | } else | ||
1742 | vha->flags.difdix_supported = 0; | ||
1743 | } | ||
1744 | |||
1729 | if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, | 1745 | if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, |
1730 | &ha->pdev->dev)) { | 1746 | &ha->pdev->dev)) { |
1731 | DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", | 1747 | DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 89bfc119010b..2afc8a362f2c 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
@@ -1663,4 +1663,62 @@ qla2x00_dump_buffer(uint8_t * b, uint32_t size) | |||
1663 | printk("\n"); | 1663 | printk("\n"); |
1664 | } | 1664 | } |
1665 | 1665 | ||
1666 | void | ||
1667 | qla2x00_dump_buffer_zipped(uint8_t *b, uint32_t size) | ||
1668 | { | ||
1669 | uint32_t cnt; | ||
1670 | uint8_t c; | ||
1671 | uint8_t last16[16], cur16[16]; | ||
1672 | uint32_t lc = 0, num_same16 = 0, j; | ||
1673 | |||
1674 | printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 " | ||
1675 | "Ah Bh Ch Dh Eh Fh\n"); | ||
1676 | printk(KERN_DEBUG "----------------------------------------" | ||
1677 | "----------------------\n"); | ||
1678 | |||
1679 | for (cnt = 0; cnt < size;) { | ||
1680 | c = *b++; | ||
1666 | 1681 | ||
1682 | cur16[lc++] = c; | ||
1683 | |||
1684 | cnt++; | ||
1685 | if (cnt % 16) | ||
1686 | continue; | ||
1687 | |||
1688 | /* We have 16 now */ | ||
1689 | lc = 0; | ||
1690 | if (num_same16 == 0) { | ||
1691 | memcpy(last16, cur16, 16); | ||
1692 | num_same16++; | ||
1693 | continue; | ||
1694 | } | ||
1695 | if (memcmp(cur16, last16, 16) == 0) { | ||
1696 | num_same16++; | ||
1697 | continue; | ||
1698 | } | ||
1699 | for (j = 0; j < 16; j++) | ||
1700 | printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]); | ||
1701 | printk(KERN_DEBUG "\n"); | ||
1702 | |||
1703 | if (num_same16 > 1) | ||
1704 | printk(KERN_DEBUG "> prev pattern repeats (%u)" | ||
1705 | "more times\n", num_same16-1); | ||
1706 | memcpy(last16, cur16, 16); | ||
1707 | num_same16 = 1; | ||
1708 | } | ||
1709 | |||
1710 | if (num_same16) { | ||
1711 | for (j = 0; j < 16; j++) | ||
1712 | printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]); | ||
1713 | printk(KERN_DEBUG "\n"); | ||
1714 | |||
1715 | if (num_same16 > 1) | ||
1716 | printk(KERN_DEBUG "> prev pattern repeats (%u)" | ||
1717 | "more times\n", num_same16-1); | ||
1718 | } | ||
1719 | if (lc) { | ||
1720 | for (j = 0; j < lc; j++) | ||
1721 | printk(KERN_DEBUG "%02x ", (uint32_t)cur16[j]); | ||
1722 | printk(KERN_DEBUG "\n"); | ||
1723 | } | ||
1724 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index d6d9c86cb058..916c81f3f55d 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h | |||
@@ -27,6 +27,9 @@ | |||
27 | /* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ | 27 | /* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ |
28 | /* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ | 28 | /* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ |
29 | /* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ | 29 | /* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ |
30 | /* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */ | ||
31 | |||
32 | /* #define QL_PRINTK_BUF */ /* Captures printk to buffer */ | ||
30 | 33 | ||
31 | /* | 34 | /* |
32 | * Macros use for debugging the driver. | 35 | * Macros use for debugging the driver. |
@@ -139,6 +142,13 @@ | |||
139 | #define DEBUG17(x) do {} while (0) | 142 | #define DEBUG17(x) do {} while (0) |
140 | #endif | 143 | #endif |
141 | 144 | ||
145 | #if defined(QL_DEBUG_LEVEL_18) | ||
146 | #define DEBUG18(x) do {if (ql2xextended_error_logging) x; } while (0) | ||
147 | #else | ||
148 | #define DEBUG18(x) do {} while (0) | ||
149 | #endif | ||
150 | |||
151 | |||
142 | /* | 152 | /* |
143 | * Firmware Dump structure definition | 153 | * Firmware Dump structure definition |
144 | */ | 154 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 0d2cecbb8f47..4559f5c6c1ae 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -189,6 +189,16 @@ | |||
189 | struct req_que; | 189 | struct req_que; |
190 | 190 | ||
191 | /* | 191 | /* |
192 | * (sd.h is not exported, hence local inclusion) | ||
193 | * Data Integrity Field tuple. | ||
194 | */ | ||
195 | struct sd_dif_tuple { | ||
196 | __be16 guard_tag; /* Checksum */ | ||
197 | __be16 app_tag; /* Opaque storage */ | ||
198 | __be32 ref_tag; /* Target LBA or indirect LBA */ | ||
199 | }; | ||
200 | |||
201 | /* | ||
192 | * SCSI Request Block | 202 | * SCSI Request Block |
193 | */ | 203 | */ |
194 | typedef struct srb { | 204 | typedef struct srb { |
@@ -208,8 +218,14 @@ typedef struct srb { | |||
208 | /* | 218 | /* |
209 | * SRB flag definitions | 219 | * SRB flag definitions |
210 | */ | 220 | */ |
211 | #define SRB_DMA_VALID BIT_0 /* Command sent to ISP */ | 221 | #define SRB_DMA_VALID BIT_0 /* Command sent to ISP */ |
212 | #define SRB_FCP_CMND_DMA_VALID BIT_12 /* FCP command in IOCB */ | 222 | #define SRB_FCP_CMND_DMA_VALID BIT_12 /* DIF: DSD List valid */ |
223 | #define SRB_CRC_CTX_DMA_VALID BIT_2 /* DIF: context DMA valid */ | ||
224 | #define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */ | ||
225 | #define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */ | ||
226 | |||
227 | /* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */ | ||
228 | #define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID) | ||
213 | 229 | ||
214 | /* | 230 | /* |
215 | * SRB extensions. | 231 | * SRB extensions. |
@@ -1330,6 +1346,66 @@ typedef struct { | |||
1330 | uint32_t dseg_4_length; /* Data segment 4 length. */ | 1346 | uint32_t dseg_4_length; /* Data segment 4 length. */ |
1331 | } cont_a64_entry_t; | 1347 | } cont_a64_entry_t; |
1332 | 1348 | ||
1349 | #define PO_MODE_DIF_INSERT 0 | ||
1350 | #define PO_MODE_DIF_REMOVE BIT_0 | ||
1351 | #define PO_MODE_DIF_PASS BIT_1 | ||
1352 | #define PO_MODE_DIF_REPLACE (BIT_0 + BIT_1) | ||
1353 | #define PO_ENABLE_DIF_BUNDLING BIT_8 | ||
1354 | #define PO_ENABLE_INCR_GUARD_SEED BIT_3 | ||
1355 | #define PO_DISABLE_INCR_REF_TAG BIT_5 | ||
1356 | #define PO_DISABLE_GUARD_CHECK BIT_4 | ||
1357 | /* | ||
1358 | * ISP queue - 64-Bit addressing, continuation crc entry structure definition. | ||
1359 | */ | ||
1360 | struct crc_context { | ||
1361 | uint32_t handle; /* System handle. */ | ||
1362 | uint32_t ref_tag; | ||
1363 | uint16_t app_tag; | ||
1364 | uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ | ||
1365 | uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ | ||
1366 | uint16_t guard_seed; /* Initial Guard Seed */ | ||
1367 | uint16_t prot_opts; /* Requested Data Protection Mode */ | ||
1368 | uint16_t blk_size; /* Data size in bytes */ | ||
1369 | uint16_t runt_blk_guard; /* Guard value for runt block (tape | ||
1370 | * only) */ | ||
1371 | uint32_t byte_count; /* Total byte count/ total data | ||
1372 | * transfer count */ | ||
1373 | union { | ||
1374 | struct { | ||
1375 | uint32_t reserved_1; | ||
1376 | uint16_t reserved_2; | ||
1377 | uint16_t reserved_3; | ||
1378 | uint32_t reserved_4; | ||
1379 | uint32_t data_address[2]; | ||
1380 | uint32_t data_length; | ||
1381 | uint32_t reserved_5[2]; | ||
1382 | uint32_t reserved_6; | ||
1383 | } nobundling; | ||
1384 | struct { | ||
1385 | uint32_t dif_byte_count; /* Total DIF byte | ||
1386 | * count */ | ||
1387 | uint16_t reserved_1; | ||
1388 | uint16_t dseg_count; /* Data segment count */ | ||
1389 | uint32_t reserved_2; | ||
1390 | uint32_t data_address[2]; | ||
1391 | uint32_t data_length; | ||
1392 | uint32_t dif_address[2]; | ||
1393 | uint32_t dif_length; /* Data segment 0 | ||
1394 | * length */ | ||
1395 | } bundling; | ||
1396 | } u; | ||
1397 | |||
1398 | struct fcp_cmnd fcp_cmnd; | ||
1399 | dma_addr_t crc_ctx_dma; | ||
1400 | /* List of DMA context transfers */ | ||
1401 | struct list_head dsd_list; | ||
1402 | |||
1403 | /* This structure should not exceed 512 bytes */ | ||
1404 | }; | ||
1405 | |||
1406 | #define CRC_CONTEXT_LEN_FW (offsetof(struct crc_context, fcp_cmnd.lun)) | ||
1407 | #define CRC_CONTEXT_FCPCMND_OFF (offsetof(struct crc_context, fcp_cmnd.lun)) | ||
1408 | |||
1333 | /* | 1409 | /* |
1334 | * ISP queue - status entry structure definition. | 1410 | * ISP queue - status entry structure definition. |
1335 | */ | 1411 | */ |
@@ -1390,6 +1466,7 @@ typedef struct { | |||
1390 | #define CS_ABORTED 0x5 /* System aborted command. */ | 1466 | #define CS_ABORTED 0x5 /* System aborted command. */ |
1391 | #define CS_TIMEOUT 0x6 /* Timeout error. */ | 1467 | #define CS_TIMEOUT 0x6 /* Timeout error. */ |
1392 | #define CS_DATA_OVERRUN 0x7 /* Data overrun. */ | 1468 | #define CS_DATA_OVERRUN 0x7 /* Data overrun. */ |
1469 | #define CS_DIF_ERROR 0xC /* DIF error detected */ | ||
1393 | 1470 | ||
1394 | #define CS_DATA_UNDERRUN 0x15 /* Data Underrun. */ | 1471 | #define CS_DATA_UNDERRUN 0x15 /* Data Underrun. */ |
1395 | #define CS_QUEUE_FULL 0x1C /* Queue Full. */ | 1472 | #define CS_QUEUE_FULL 0x1C /* Queue Full. */ |
@@ -2732,6 +2809,7 @@ typedef struct scsi_qla_host { | |||
2732 | 2809 | ||
2733 | uint32_t management_server_logged_in :1; | 2810 | uint32_t management_server_logged_in :1; |
2734 | uint32_t process_response_queue :1; | 2811 | uint32_t process_response_queue :1; |
2812 | uint32_t difdix_supported:1; | ||
2735 | } flags; | 2813 | } flags; |
2736 | 2814 | ||
2737 | atomic_t loop_state; | 2815 | atomic_t loop_state; |
@@ -2883,6 +2961,8 @@ typedef struct scsi_qla_host { | |||
2883 | #define OPTROM_BURST_SIZE 0x1000 | 2961 | #define OPTROM_BURST_SIZE 0x1000 |
2884 | #define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4) | 2962 | #define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4) |
2885 | 2963 | ||
2964 | #define QLA_DSDS_PER_IOCB 37 | ||
2965 | |||
2886 | #include "qla_gbl.h" | 2966 | #include "qla_gbl.h" |
2887 | #include "qla_dbg.h" | 2967 | #include "qla_dbg.h" |
2888 | #include "qla_inline.h" | 2968 | #include "qla_inline.h" |
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index a77a2471eaff..93f833960147 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h | |||
@@ -400,6 +400,7 @@ struct cmd_type_6 { | |||
400 | struct scsi_lun lun; /* FCP LUN (BE). */ | 400 | struct scsi_lun lun; /* FCP LUN (BE). */ |
401 | 401 | ||
402 | uint16_t control_flags; /* Control flags. */ | 402 | uint16_t control_flags; /* Control flags. */ |
403 | #define CF_DIF_SEG_DESCR_ENABLE BIT_3 | ||
403 | #define CF_DATA_SEG_DESCR_ENABLE BIT_2 | 404 | #define CF_DATA_SEG_DESCR_ENABLE BIT_2 |
404 | #define CF_READ_DATA BIT_1 | 405 | #define CF_READ_DATA BIT_1 |
405 | #define CF_WRITE_DATA BIT_0 | 406 | #define CF_WRITE_DATA BIT_0 |
@@ -466,6 +467,43 @@ struct cmd_type_7 { | |||
466 | uint32_t dseg_0_len; /* Data segment 0 length. */ | 467 | uint32_t dseg_0_len; /* Data segment 0 length. */ |
467 | }; | 468 | }; |
468 | 469 | ||
470 | #define COMMAND_TYPE_CRC_2 0x6A /* Command Type CRC_2 (Type 6) | ||
471 | * (T10-DIF) */ | ||
472 | struct cmd_type_crc_2 { | ||
473 | uint8_t entry_type; /* Entry type. */ | ||
474 | uint8_t entry_count; /* Entry count. */ | ||
475 | uint8_t sys_define; /* System defined. */ | ||
476 | uint8_t entry_status; /* Entry Status. */ | ||
477 | |||
478 | uint32_t handle; /* System handle. */ | ||
479 | |||
480 | uint16_t nport_handle; /* N_PORT handle. */ | ||
481 | uint16_t timeout; /* Command timeout. */ | ||
482 | |||
483 | uint16_t dseg_count; /* Data segment count. */ | ||
484 | |||
485 | uint16_t fcp_rsp_dseg_len; /* FCP_RSP DSD length. */ | ||
486 | |||
487 | struct scsi_lun lun; /* FCP LUN (BE). */ | ||
488 | |||
489 | uint16_t control_flags; /* Control flags. */ | ||
490 | |||
491 | uint16_t fcp_cmnd_dseg_len; /* Data segment length. */ | ||
492 | uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */ | ||
493 | |||
494 | uint32_t fcp_rsp_dseg_address[2]; /* Data segment address. */ | ||
495 | |||
496 | uint32_t byte_count; /* Total byte count. */ | ||
497 | |||
498 | uint8_t port_id[3]; /* PortID of destination port. */ | ||
499 | uint8_t vp_index; | ||
500 | |||
501 | uint32_t crc_context_address[2]; /* Data segment address. */ | ||
502 | uint16_t crc_context_len; /* Data segment length. */ | ||
503 | uint16_t reserved_1; /* MUST be set to 0. */ | ||
504 | }; | ||
505 | |||
506 | |||
469 | /* | 507 | /* |
470 | * ISP queue - status entry structure definition. | 508 | * ISP queue - status entry structure definition. |
471 | */ | 509 | */ |
@@ -496,10 +534,17 @@ struct sts_entry_24xx { | |||
496 | 534 | ||
497 | uint32_t sense_len; /* FCP SENSE length. */ | 535 | uint32_t sense_len; /* FCP SENSE length. */ |
498 | uint32_t rsp_data_len; /* FCP response data length. */ | 536 | uint32_t rsp_data_len; /* FCP response data length. */ |
499 | |||
500 | uint8_t data[28]; /* FCP response/sense information. */ | 537 | uint8_t data[28]; /* FCP response/sense information. */ |
538 | /* | ||
539 | * If DIF Error is set in comp_status, these additional fields are | ||
540 | * defined: | ||
541 | * &data[10] : uint8_t report_runt_bg[2]; - computed guard | ||
542 | * &data[12] : uint8_t actual_dif[8]; - DIF Data recieved | ||
543 | * &data[20] : uint8_t expected_dif[8]; - DIF Data computed | ||
544 | */ | ||
501 | }; | 545 | }; |
502 | 546 | ||
547 | |||
503 | /* | 548 | /* |
504 | * Status entry completion status | 549 | * Status entry completion status |
505 | */ | 550 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 3dbefe1a6b5f..3e946da74b96 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
@@ -94,6 +94,8 @@ extern int ql2xshiftctondsd; | |||
94 | extern int ql2xdbwr; | 94 | extern int ql2xdbwr; |
95 | extern int ql2xdontresethba; | 95 | extern int ql2xdontresethba; |
96 | extern int ql2xasynctmfenable; | 96 | extern int ql2xasynctmfenable; |
97 | extern int ql2xenabledif; | ||
98 | extern int ql2xenablehba_err_chk; | ||
97 | 99 | ||
98 | extern int qla2x00_loop_reset(scsi_qla_host_t *); | 100 | extern int qla2x00_loop_reset(scsi_qla_host_t *); |
99 | extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); | 101 | extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); |
@@ -179,6 +181,7 @@ extern int qla2x00_start_sp(srb_t *); | |||
179 | extern void qla2x00_ctx_sp_free(srb_t *); | 181 | extern void qla2x00_ctx_sp_free(srb_t *); |
180 | extern uint16_t qla24xx_calc_iocbs(uint16_t); | 182 | extern uint16_t qla24xx_calc_iocbs(uint16_t); |
181 | extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); | 183 | extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); |
184 | extern int qla24xx_dif_start_scsi(srb_t *); | ||
182 | 185 | ||
183 | 186 | ||
184 | /* | 187 | /* |
@@ -423,6 +426,7 @@ extern void qla25xx_fw_dump(scsi_qla_host_t *, int); | |||
423 | extern void qla81xx_fw_dump(scsi_qla_host_t *, int); | 426 | extern void qla81xx_fw_dump(scsi_qla_host_t *, int); |
424 | extern void qla2x00_dump_regs(scsi_qla_host_t *); | 427 | extern void qla2x00_dump_regs(scsi_qla_host_t *); |
425 | extern void qla2x00_dump_buffer(uint8_t *, uint32_t); | 428 | extern void qla2x00_dump_buffer(uint8_t *, uint32_t); |
429 | extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t); | ||
426 | 430 | ||
427 | /* | 431 | /* |
428 | * Global Function Prototypes in qla_gs.c source file. | 432 | * Global Function Prototypes in qla_gs.c source file. |
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index ad53c6455556..84c2fea154d2 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h | |||
@@ -67,3 +67,19 @@ qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id) | |||
67 | return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) || | 67 | return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) || |
68 | loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST); | 68 | loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST); |
69 | } | 69 | } |
70 | |||
71 | static inline void | ||
72 | qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp) | ||
73 | { | ||
74 | struct dsd_dma *dsd_ptr, *tdsd_ptr; | ||
75 | |||
76 | /* clean up allocated prev pool */ | ||
77 | list_for_each_entry_safe(dsd_ptr, tdsd_ptr, | ||
78 | &((struct crc_context *)sp->ctx)->dsd_list, list) { | ||
79 | dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, | ||
80 | dsd_ptr->dsd_list_dma); | ||
81 | list_del(&dsd_ptr->list); | ||
82 | kfree(dsd_ptr); | ||
83 | } | ||
84 | INIT_LIST_HEAD(&((struct crc_context *)sp->ctx)->dsd_list); | ||
85 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index d7a9fff15ad5..8ef945365412 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -145,7 +145,49 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha) | |||
145 | return (cont_pkt); | 145 | return (cont_pkt); |
146 | } | 146 | } |
147 | 147 | ||
148 | /** | 148 | static inline int |
149 | qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) | ||
150 | { | ||
151 | uint8_t guard = scsi_host_get_guard(sp->cmd->device->host); | ||
152 | |||
153 | /* We only support T10 DIF right now */ | ||
154 | if (guard != SHOST_DIX_GUARD_CRC) { | ||
155 | DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard)); | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | /* We always use DIFF Bundling for best performance */ | ||
160 | *fw_prot_opts = 0; | ||
161 | |||
162 | /* Translate SCSI opcode to a protection opcode */ | ||
163 | switch (scsi_get_prot_op(sp->cmd)) { | ||
164 | case SCSI_PROT_READ_STRIP: | ||
165 | *fw_prot_opts |= PO_MODE_DIF_REMOVE; | ||
166 | break; | ||
167 | case SCSI_PROT_WRITE_INSERT: | ||
168 | *fw_prot_opts |= PO_MODE_DIF_INSERT; | ||
169 | break; | ||
170 | case SCSI_PROT_READ_INSERT: | ||
171 | *fw_prot_opts |= PO_MODE_DIF_INSERT; | ||
172 | break; | ||
173 | case SCSI_PROT_WRITE_STRIP: | ||
174 | *fw_prot_opts |= PO_MODE_DIF_REMOVE; | ||
175 | break; | ||
176 | case SCSI_PROT_READ_PASS: | ||
177 | *fw_prot_opts |= PO_MODE_DIF_PASS; | ||
178 | break; | ||
179 | case SCSI_PROT_WRITE_PASS: | ||
180 | *fw_prot_opts |= PO_MODE_DIF_PASS; | ||
181 | break; | ||
182 | default: /* Normal Request */ | ||
183 | *fw_prot_opts |= PO_MODE_DIF_PASS; | ||
184 | break; | ||
185 | } | ||
186 | |||
187 | return scsi_prot_sg_count(sp->cmd); | ||
188 | } | ||
189 | |||
190 | /* | ||
149 | * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit | 191 | * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit |
150 | * capable IOCB types. | 192 | * capable IOCB types. |
151 | * | 193 | * |
@@ -636,6 +678,8 @@ qla24xx_calc_iocbs(uint16_t dsds) | |||
636 | if ((dsds - 1) % 5) | 678 | if ((dsds - 1) % 5) |
637 | iocbs++; | 679 | iocbs++; |
638 | } | 680 | } |
681 | DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n", | ||
682 | __func__, iocbs)); | ||
639 | return iocbs; | 683 | return iocbs; |
640 | } | 684 | } |
641 | 685 | ||
@@ -716,6 +760,453 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, | |||
716 | } | 760 | } |
717 | } | 761 | } |
718 | 762 | ||
763 | struct fw_dif_context { | ||
764 | uint32_t ref_tag; | ||
765 | uint16_t app_tag; | ||
766 | uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ | ||
767 | uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ | ||
768 | }; | ||
769 | |||
770 | /* | ||
771 | * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command | ||
772 | * | ||
773 | */ | ||
774 | static inline void | ||
775 | qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, | ||
776 | unsigned int protcnt) | ||
777 | { | ||
778 | struct sd_dif_tuple *spt; | ||
779 | unsigned char op = scsi_get_prot_op(cmd); | ||
780 | |||
781 | switch (scsi_get_prot_type(cmd)) { | ||
782 | /* For TYPE 0 protection: no checking */ | ||
783 | case SCSI_PROT_DIF_TYPE0: | ||
784 | pkt->ref_tag_mask[0] = 0x00; | ||
785 | pkt->ref_tag_mask[1] = 0x00; | ||
786 | pkt->ref_tag_mask[2] = 0x00; | ||
787 | pkt->ref_tag_mask[3] = 0x00; | ||
788 | break; | ||
789 | |||
790 | /* | ||
791 | * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to | ||
792 | * match LBA in CDB + N | ||
793 | */ | ||
794 | case SCSI_PROT_DIF_TYPE2: | ||
795 | break; | ||
796 | |||
797 | /* For Type 3 protection: 16 bit GUARD only */ | ||
798 | case SCSI_PROT_DIF_TYPE3: | ||
799 | pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] = | ||
800 | pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] = | ||
801 | 0x00; | ||
802 | break; | ||
803 | |||
804 | /* | ||
805 | * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and | ||
806 | * 16 bit app tag. | ||
807 | */ | ||
808 | case SCSI_PROT_DIF_TYPE1: | ||
809 | if (!ql2xenablehba_err_chk) | ||
810 | break; | ||
811 | |||
812 | if (protcnt && (op == SCSI_PROT_WRITE_STRIP || | ||
813 | op == SCSI_PROT_WRITE_PASS)) { | ||
814 | spt = page_address(sg_page(scsi_prot_sglist(cmd))) + | ||
815 | scsi_prot_sglist(cmd)[0].offset; | ||
816 | DEBUG18(printk(KERN_DEBUG | ||
817 | "%s(): LBA from user %p, lba = 0x%x\n", | ||
818 | __func__, spt, (int)spt->ref_tag)); | ||
819 | pkt->ref_tag = swab32(spt->ref_tag); | ||
820 | pkt->app_tag_mask[0] = 0x0; | ||
821 | pkt->app_tag_mask[1] = 0x0; | ||
822 | } else { | ||
823 | pkt->ref_tag = cpu_to_le32((uint32_t) | ||
824 | (0xffffffff & scsi_get_lba(cmd))); | ||
825 | pkt->app_tag = __constant_cpu_to_le16(0); | ||
826 | pkt->app_tag_mask[0] = 0x0; | ||
827 | pkt->app_tag_mask[1] = 0x0; | ||
828 | } | ||
829 | /* enable ALL bytes of the ref tag */ | ||
830 | pkt->ref_tag_mask[0] = 0xff; | ||
831 | pkt->ref_tag_mask[1] = 0xff; | ||
832 | pkt->ref_tag_mask[2] = 0xff; | ||
833 | pkt->ref_tag_mask[3] = 0xff; | ||
834 | break; | ||
835 | } | ||
836 | |||
837 | DEBUG18(printk(KERN_DEBUG | ||
838 | "%s(): Setting protection Tags: (BIG) ref tag = 0x%x," | ||
839 | " app tag = 0x%x, prot SG count %d , cmd lba 0x%x," | ||
840 | " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt, | ||
841 | (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd))); | ||
842 | } | ||
843 | |||
844 | |||
845 | static int | ||
846 | qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | ||
847 | uint16_t tot_dsds) | ||
848 | { | ||
849 | void *next_dsd; | ||
850 | uint8_t avail_dsds = 0; | ||
851 | uint32_t dsd_list_len; | ||
852 | struct dsd_dma *dsd_ptr; | ||
853 | struct scatterlist *sg; | ||
854 | uint32_t *cur_dsd = dsd; | ||
855 | int i; | ||
856 | uint16_t used_dsds = tot_dsds; | ||
857 | |||
858 | uint8_t *cp; | ||
859 | |||
860 | scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) { | ||
861 | dma_addr_t sle_dma; | ||
862 | |||
863 | /* Allocate additional continuation packets? */ | ||
864 | if (avail_dsds == 0) { | ||
865 | avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? | ||
866 | QLA_DSDS_PER_IOCB : used_dsds; | ||
867 | dsd_list_len = (avail_dsds + 1) * 12; | ||
868 | used_dsds -= avail_dsds; | ||
869 | |||
870 | /* allocate tracking DS */ | ||
871 | dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); | ||
872 | if (!dsd_ptr) | ||
873 | return 1; | ||
874 | |||
875 | /* allocate new list */ | ||
876 | dsd_ptr->dsd_addr = next_dsd = | ||
877 | dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, | ||
878 | &dsd_ptr->dsd_list_dma); | ||
879 | |||
880 | if (!next_dsd) { | ||
881 | /* | ||
882 | * Need to cleanup only this dsd_ptr, rest | ||
883 | * will be done by sp_free_dma() | ||
884 | */ | ||
885 | kfree(dsd_ptr); | ||
886 | return 1; | ||
887 | } | ||
888 | |||
889 | list_add_tail(&dsd_ptr->list, | ||
890 | &((struct crc_context *)sp->ctx)->dsd_list); | ||
891 | |||
892 | sp->flags |= SRB_CRC_CTX_DSD_VALID; | ||
893 | |||
894 | /* add new list to cmd iocb or last list */ | ||
895 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | ||
896 | *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); | ||
897 | *cur_dsd++ = dsd_list_len; | ||
898 | cur_dsd = (uint32_t *)next_dsd; | ||
899 | } | ||
900 | sle_dma = sg_dma_address(sg); | ||
901 | DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x," | ||
902 | " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma), | ||
903 | MSD(sle_dma), sg_dma_len(sg))); | ||
904 | *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | ||
905 | *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | ||
906 | *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); | ||
907 | avail_dsds--; | ||
908 | |||
909 | if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { | ||
910 | cp = page_address(sg_page(sg)) + sg->offset; | ||
911 | DEBUG18(printk("%s(): User Data buffer= %p:\n", | ||
912 | __func__ , cp)); | ||
913 | } | ||
914 | } | ||
915 | /* Null termination */ | ||
916 | *cur_dsd++ = 0; | ||
917 | *cur_dsd++ = 0; | ||
918 | *cur_dsd++ = 0; | ||
919 | return 0; | ||
920 | } | ||
921 | |||
922 | static int | ||
923 | qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, | ||
924 | uint32_t *dsd, | ||
925 | uint16_t tot_dsds) | ||
926 | { | ||
927 | void *next_dsd; | ||
928 | uint8_t avail_dsds = 0; | ||
929 | uint32_t dsd_list_len; | ||
930 | struct dsd_dma *dsd_ptr; | ||
931 | struct scatterlist *sg; | ||
932 | int i; | ||
933 | struct scsi_cmnd *cmd; | ||
934 | uint32_t *cur_dsd = dsd; | ||
935 | uint16_t used_dsds = tot_dsds; | ||
936 | |||
937 | uint8_t *cp; | ||
938 | |||
939 | |||
940 | cmd = sp->cmd; | ||
941 | scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) { | ||
942 | dma_addr_t sle_dma; | ||
943 | |||
944 | /* Allocate additional continuation packets? */ | ||
945 | if (avail_dsds == 0) { | ||
946 | avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? | ||
947 | QLA_DSDS_PER_IOCB : used_dsds; | ||
948 | dsd_list_len = (avail_dsds + 1) * 12; | ||
949 | used_dsds -= avail_dsds; | ||
950 | |||
951 | /* allocate tracking DS */ | ||
952 | dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); | ||
953 | if (!dsd_ptr) | ||
954 | return 1; | ||
955 | |||
956 | /* allocate new list */ | ||
957 | dsd_ptr->dsd_addr = next_dsd = | ||
958 | dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, | ||
959 | &dsd_ptr->dsd_list_dma); | ||
960 | |||
961 | if (!next_dsd) { | ||
962 | /* | ||
963 | * Need to cleanup only this dsd_ptr, rest | ||
964 | * will be done by sp_free_dma() | ||
965 | */ | ||
966 | kfree(dsd_ptr); | ||
967 | return 1; | ||
968 | } | ||
969 | |||
970 | list_add_tail(&dsd_ptr->list, | ||
971 | &((struct crc_context *)sp->ctx)->dsd_list); | ||
972 | |||
973 | sp->flags |= SRB_CRC_CTX_DSD_VALID; | ||
974 | |||
975 | /* add new list to cmd iocb or last list */ | ||
976 | *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | ||
977 | *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); | ||
978 | *cur_dsd++ = dsd_list_len; | ||
979 | cur_dsd = (uint32_t *)next_dsd; | ||
980 | } | ||
981 | sle_dma = sg_dma_address(sg); | ||
982 | if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { | ||
983 | DEBUG18(printk(KERN_DEBUG | ||
984 | "%s(): %p, sg entry %d - addr =0x%x" | ||
985 | "0x%x, len =%d\n", __func__ , cur_dsd, i, | ||
986 | LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg))); | ||
987 | } | ||
988 | *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | ||
989 | *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | ||
990 | *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); | ||
991 | |||
992 | if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { | ||
993 | cp = page_address(sg_page(sg)) + sg->offset; | ||
994 | DEBUG18(printk("%s(): Protection Data buffer = %p:\n", | ||
995 | __func__ , cp)); | ||
996 | } | ||
997 | avail_dsds--; | ||
998 | } | ||
999 | /* Null termination */ | ||
1000 | *cur_dsd++ = 0; | ||
1001 | *cur_dsd++ = 0; | ||
1002 | *cur_dsd++ = 0; | ||
1003 | return 0; | ||
1004 | } | ||
1005 | |||
1006 | /** | ||
1007 | * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command | ||
1008 | * Type 6 IOCB types. | ||
1009 | * | ||
1010 | * @sp: SRB command to process | ||
1011 | * @cmd_pkt: Command type 3 IOCB | ||
1012 | * @tot_dsds: Total number of segments to transfer | ||
1013 | */ | ||
1014 | static inline int | ||
1015 | qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | ||
1016 | uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) | ||
1017 | { | ||
1018 | uint32_t *cur_dsd, *fcp_dl; | ||
1019 | scsi_qla_host_t *vha; | ||
1020 | struct scsi_cmnd *cmd; | ||
1021 | struct scatterlist *cur_seg; | ||
1022 | int sgc; | ||
1023 | uint32_t total_bytes; | ||
1024 | uint32_t data_bytes; | ||
1025 | uint32_t dif_bytes; | ||
1026 | uint8_t bundling = 1; | ||
1027 | uint16_t blk_size; | ||
1028 | uint8_t *clr_ptr; | ||
1029 | struct crc_context *crc_ctx_pkt = NULL; | ||
1030 | struct qla_hw_data *ha; | ||
1031 | uint8_t additional_fcpcdb_len; | ||
1032 | uint16_t fcp_cmnd_len; | ||
1033 | struct fcp_cmnd *fcp_cmnd; | ||
1034 | dma_addr_t crc_ctx_dma; | ||
1035 | |||
1036 | cmd = sp->cmd; | ||
1037 | |||
1038 | sgc = 0; | ||
1039 | /* Update entry type to indicate Command Type CRC_2 IOCB */ | ||
1040 | *((uint32_t *)(&cmd_pkt->entry_type)) = | ||
1041 | __constant_cpu_to_le32(COMMAND_TYPE_CRC_2); | ||
1042 | |||
1043 | /* No data transfer */ | ||
1044 | data_bytes = scsi_bufflen(cmd); | ||
1045 | if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { | ||
1046 | DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n", | ||
1047 | __func__, data_bytes)); | ||
1048 | cmd_pkt->byte_count = __constant_cpu_to_le32(0); | ||
1049 | return QLA_SUCCESS; | ||
1050 | } | ||
1051 | |||
1052 | vha = sp->fcport->vha; | ||
1053 | ha = vha->hw; | ||
1054 | |||
1055 | DEBUG18(printk(KERN_DEBUG | ||
1056 | "%s(%ld): Executing cmd sp %p, pid=%ld, prot_op=%u.\n", __func__, | ||
1057 | vha->host_no, sp, cmd->serial_number, scsi_get_prot_op(sp->cmd))); | ||
1058 | |||
1059 | cmd_pkt->vp_index = sp->fcport->vp_idx; | ||
1060 | |||
1061 | /* Set transfer direction */ | ||
1062 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { | ||
1063 | cmd_pkt->control_flags = | ||
1064 | __constant_cpu_to_le16(CF_WRITE_DATA); | ||
1065 | } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { | ||
1066 | cmd_pkt->control_flags = | ||
1067 | __constant_cpu_to_le16(CF_READ_DATA); | ||
1068 | } | ||
1069 | |||
1070 | tot_prot_dsds = scsi_prot_sg_count(cmd); | ||
1071 | if (!tot_prot_dsds) | ||
1072 | bundling = 0; | ||
1073 | |||
1074 | /* Allocate CRC context from global pool */ | ||
1075 | crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool, | ||
1076 | GFP_ATOMIC, &crc_ctx_dma); | ||
1077 | |||
1078 | if (!crc_ctx_pkt) | ||
1079 | goto crc_queuing_error; | ||
1080 | |||
1081 | /* Zero out CTX area. */ | ||
1082 | clr_ptr = (uint8_t *)crc_ctx_pkt; | ||
1083 | memset(clr_ptr, 0, sizeof(*crc_ctx_pkt)); | ||
1084 | |||
1085 | crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; | ||
1086 | |||
1087 | sp->flags |= SRB_CRC_CTX_DMA_VALID; | ||
1088 | |||
1089 | /* Set handle */ | ||
1090 | crc_ctx_pkt->handle = cmd_pkt->handle; | ||
1091 | |||
1092 | INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); | ||
1093 | |||
1094 | qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *) | ||
1095 | &crc_ctx_pkt->ref_tag, tot_prot_dsds); | ||
1096 | |||
1097 | cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); | ||
1098 | cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); | ||
1099 | cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW; | ||
1100 | |||
1101 | /* Determine SCSI command length -- align to 4 byte boundary */ | ||
1102 | if (cmd->cmd_len > 16) { | ||
1103 | DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n", | ||
1104 | __func__)); | ||
1105 | additional_fcpcdb_len = cmd->cmd_len - 16; | ||
1106 | if ((cmd->cmd_len % 4) != 0) { | ||
1107 | /* SCSI cmd > 16 bytes must be multiple of 4 */ | ||
1108 | goto crc_queuing_error; | ||
1109 | } | ||
1110 | fcp_cmnd_len = 12 + cmd->cmd_len + 4; | ||
1111 | } else { | ||
1112 | additional_fcpcdb_len = 0; | ||
1113 | fcp_cmnd_len = 12 + 16 + 4; | ||
1114 | } | ||
1115 | |||
1116 | fcp_cmnd = &crc_ctx_pkt->fcp_cmnd; | ||
1117 | |||
1118 | fcp_cmnd->additional_cdb_len = additional_fcpcdb_len; | ||
1119 | if (cmd->sc_data_direction == DMA_TO_DEVICE) | ||
1120 | fcp_cmnd->additional_cdb_len |= 1; | ||
1121 | else if (cmd->sc_data_direction == DMA_FROM_DEVICE) | ||
1122 | fcp_cmnd->additional_cdb_len |= 2; | ||
1123 | |||
1124 | int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); | ||
1125 | memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); | ||
1126 | cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); | ||
1127 | cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( | ||
1128 | LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); | ||
1129 | cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32( | ||
1130 | MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); | ||
1131 | fcp_cmnd->task_attribute = 0; | ||
1132 | fcp_cmnd->task_managment = 0; | ||
1133 | |||
1134 | cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ | ||
1135 | |||
1136 | DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data" | ||
1137 | "entries %d, data bytes %d, Protection entries %d\n", | ||
1138 | __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds), | ||
1139 | data_bytes, tot_prot_dsds)); | ||
1140 | |||
1141 | /* Compute dif len and adjust data len to incude protection */ | ||
1142 | total_bytes = data_bytes; | ||
1143 | dif_bytes = 0; | ||
1144 | blk_size = cmd->device->sector_size; | ||
1145 | if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE1) { | ||
1146 | dif_bytes = (data_bytes / blk_size) * 8; | ||
1147 | total_bytes += dif_bytes; | ||
1148 | } | ||
1149 | |||
1150 | if (!ql2xenablehba_err_chk) | ||
1151 | fw_prot_opts |= 0x10; /* Disable Guard tag checking */ | ||
1152 | |||
1153 | if (!bundling) { | ||
1154 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; | ||
1155 | } else { | ||
1156 | /* | ||
1157 | * Configure Bundling if we need to fetch interlaving | ||
1158 | * protection PCI accesses | ||
1159 | */ | ||
1160 | fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; | ||
1161 | crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); | ||
1162 | crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - | ||
1163 | tot_prot_dsds); | ||
1164 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; | ||
1165 | } | ||
1166 | |||
1167 | /* Finish the common fields of CRC pkt */ | ||
1168 | crc_ctx_pkt->blk_size = cpu_to_le16(blk_size); | ||
1169 | crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); | ||
1170 | crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); | ||
1171 | crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0); | ||
1172 | /* Fibre channel byte count */ | ||
1173 | cmd_pkt->byte_count = cpu_to_le32(total_bytes); | ||
1174 | fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + | ||
1175 | additional_fcpcdb_len); | ||
1176 | *fcp_dl = htonl(total_bytes); | ||
1177 | |||
1178 | DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes" | ||
1179 | " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__, | ||
1180 | vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes, | ||
1181 | crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size)); | ||
1182 | |||
1183 | /* Walks data segments */ | ||
1184 | |||
1185 | cmd_pkt->control_flags |= | ||
1186 | __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); | ||
1187 | if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, | ||
1188 | (tot_dsds - tot_prot_dsds))) | ||
1189 | goto crc_queuing_error; | ||
1190 | |||
1191 | if (bundling && tot_prot_dsds) { | ||
1192 | /* Walks dif segments */ | ||
1193 | cur_seg = scsi_prot_sglist(cmd); | ||
1194 | cmd_pkt->control_flags |= | ||
1195 | __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); | ||
1196 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; | ||
1197 | if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, | ||
1198 | tot_prot_dsds)) | ||
1199 | goto crc_queuing_error; | ||
1200 | } | ||
1201 | return QLA_SUCCESS; | ||
1202 | |||
1203 | crc_queuing_error: | ||
1204 | DEBUG18(qla_printk(KERN_INFO, ha, | ||
1205 | "CMD sent FAILED crc_q error:sp = %p\n", sp)); | ||
1206 | /* Cleanup will be performed by the caller */ | ||
1207 | |||
1208 | return QLA_FUNCTION_FAILED; | ||
1209 | } | ||
719 | 1210 | ||
720 | /** | 1211 | /** |
721 | * qla24xx_start_scsi() - Send a SCSI command to the ISP | 1212 | * qla24xx_start_scsi() - Send a SCSI command to the ISP |
@@ -869,6 +1360,191 @@ queuing_error: | |||
869 | return QLA_FUNCTION_FAILED; | 1360 | return QLA_FUNCTION_FAILED; |
870 | } | 1361 | } |
871 | 1362 | ||
1363 | |||
1364 | /** | ||
1365 | * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP | ||
1366 | * @sp: command to send to the ISP | ||
1367 | * | ||
1368 | * Returns non-zero if a failure occurred, else zero. | ||
1369 | */ | ||
1370 | int | ||
1371 | qla24xx_dif_start_scsi(srb_t *sp) | ||
1372 | { | ||
1373 | int nseg; | ||
1374 | unsigned long flags; | ||
1375 | uint32_t *clr_ptr; | ||
1376 | uint32_t index; | ||
1377 | uint32_t handle; | ||
1378 | uint16_t cnt; | ||
1379 | uint16_t req_cnt = 0; | ||
1380 | uint16_t tot_dsds; | ||
1381 | uint16_t tot_prot_dsds; | ||
1382 | uint16_t fw_prot_opts = 0; | ||
1383 | struct req_que *req = NULL; | ||
1384 | struct rsp_que *rsp = NULL; | ||
1385 | struct scsi_cmnd *cmd = sp->cmd; | ||
1386 | struct scsi_qla_host *vha = sp->fcport->vha; | ||
1387 | struct qla_hw_data *ha = vha->hw; | ||
1388 | struct cmd_type_crc_2 *cmd_pkt; | ||
1389 | uint32_t status = 0; | ||
1390 | |||
1391 | #define QDSS_GOT_Q_SPACE BIT_0 | ||
1392 | |||
1393 | /* Only process protection in this routine */ | ||
1394 | if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) | ||
1395 | return qla24xx_start_scsi(sp); | ||
1396 | |||
1397 | /* Setup device pointers. */ | ||
1398 | |||
1399 | qla25xx_set_que(sp, &rsp); | ||
1400 | req = vha->req; | ||
1401 | |||
1402 | /* So we know we haven't pci_map'ed anything yet */ | ||
1403 | tot_dsds = 0; | ||
1404 | |||
1405 | /* Send marker if required */ | ||
1406 | if (vha->marker_needed != 0) { | ||
1407 | if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != | ||
1408 | QLA_SUCCESS) | ||
1409 | return QLA_FUNCTION_FAILED; | ||
1410 | vha->marker_needed = 0; | ||
1411 | } | ||
1412 | |||
1413 | /* Acquire ring specific lock */ | ||
1414 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
1415 | |||
1416 | /* Check for room in outstanding command list. */ | ||
1417 | handle = req->current_outstanding_cmd; | ||
1418 | for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { | ||
1419 | handle++; | ||
1420 | if (handle == MAX_OUTSTANDING_COMMANDS) | ||
1421 | handle = 1; | ||
1422 | if (!req->outstanding_cmds[handle]) | ||
1423 | break; | ||
1424 | } | ||
1425 | |||
1426 | if (index == MAX_OUTSTANDING_COMMANDS) | ||
1427 | goto queuing_error; | ||
1428 | |||
1429 | /* Compute number of required data segments */ | ||
1430 | /* Map the sg table so we have an accurate count of sg entries needed */ | ||
1431 | if (scsi_sg_count(cmd)) { | ||
1432 | nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), | ||
1433 | scsi_sg_count(cmd), cmd->sc_data_direction); | ||
1434 | if (unlikely(!nseg)) | ||
1435 | goto queuing_error; | ||
1436 | else | ||
1437 | sp->flags |= SRB_DMA_VALID; | ||
1438 | } else | ||
1439 | nseg = 0; | ||
1440 | |||
1441 | /* number of required data segments */ | ||
1442 | tot_dsds = nseg; | ||
1443 | |||
1444 | /* Compute number of required protection segments */ | ||
1445 | if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { | ||
1446 | nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), | ||
1447 | scsi_prot_sg_count(cmd), cmd->sc_data_direction); | ||
1448 | if (unlikely(!nseg)) | ||
1449 | goto queuing_error; | ||
1450 | else | ||
1451 | sp->flags |= SRB_CRC_PROT_DMA_VALID; | ||
1452 | } else { | ||
1453 | nseg = 0; | ||
1454 | } | ||
1455 | |||
1456 | req_cnt = 1; | ||
1457 | /* Total Data and protection sg segment(s) */ | ||
1458 | tot_prot_dsds = nseg; | ||
1459 | tot_dsds += nseg; | ||
1460 | if (req->cnt < (req_cnt + 2)) { | ||
1461 | cnt = RD_REG_DWORD_RELAXED(req->req_q_out); | ||
1462 | |||
1463 | if (req->ring_index < cnt) | ||
1464 | req->cnt = cnt - req->ring_index; | ||
1465 | else | ||
1466 | req->cnt = req->length - | ||
1467 | (req->ring_index - cnt); | ||
1468 | } | ||
1469 | |||
1470 | if (req->cnt < (req_cnt + 2)) | ||
1471 | goto queuing_error; | ||
1472 | |||
1473 | status |= QDSS_GOT_Q_SPACE; | ||
1474 | |||
1475 | /* Build header part of command packet (excluding the OPCODE). */ | ||
1476 | req->current_outstanding_cmd = handle; | ||
1477 | req->outstanding_cmds[handle] = sp; | ||
1478 | sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; | ||
1479 | req->cnt -= req_cnt; | ||
1480 | |||
1481 | /* Fill-in common area */ | ||
1482 | cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; | ||
1483 | cmd_pkt->handle = MAKE_HANDLE(req->id, handle); | ||
1484 | |||
1485 | clr_ptr = (uint32_t *)cmd_pkt + 2; | ||
1486 | memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); | ||
1487 | |||
1488 | /* Set NPORT-ID and LUN number*/ | ||
1489 | cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); | ||
1490 | cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; | ||
1491 | cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; | ||
1492 | cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; | ||
1493 | |||
1494 | int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); | ||
1495 | host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); | ||
1496 | |||
1497 | /* Total Data and protection segment(s) */ | ||
1498 | cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); | ||
1499 | |||
1500 | /* Build IOCB segments and adjust for data protection segments */ | ||
1501 | if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) | ||
1502 | req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != | ||
1503 | QLA_SUCCESS) | ||
1504 | goto queuing_error; | ||
1505 | |||
1506 | cmd_pkt->entry_count = (uint8_t)req_cnt; | ||
1507 | /* Specify response queue number where completion should happen */ | ||
1508 | cmd_pkt->entry_status = (uint8_t) rsp->id; | ||
1509 | cmd_pkt->timeout = __constant_cpu_to_le16(0); | ||
1510 | wmb(); | ||
1511 | |||
1512 | /* Adjust ring index. */ | ||
1513 | req->ring_index++; | ||
1514 | if (req->ring_index == req->length) { | ||
1515 | req->ring_index = 0; | ||
1516 | req->ring_ptr = req->ring; | ||
1517 | } else | ||
1518 | req->ring_ptr++; | ||
1519 | |||
1520 | /* Set chip new ring index. */ | ||
1521 | WRT_REG_DWORD(req->req_q_in, req->ring_index); | ||
1522 | RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); | ||
1523 | |||
1524 | /* Manage unprocessed RIO/ZIO commands in response queue. */ | ||
1525 | if (vha->flags.process_response_queue && | ||
1526 | rsp->ring_ptr->signature != RESPONSE_PROCESSED) | ||
1527 | qla24xx_process_response_queue(vha, rsp); | ||
1528 | |||
1529 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
1530 | |||
1531 | return QLA_SUCCESS; | ||
1532 | |||
1533 | queuing_error: | ||
1534 | if (status & QDSS_GOT_Q_SPACE) { | ||
1535 | req->outstanding_cmds[handle] = NULL; | ||
1536 | req->cnt += req_cnt; | ||
1537 | } | ||
1538 | /* Cleanup will be performed by the caller (queuecommand) */ | ||
1539 | |||
1540 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
1541 | |||
1542 | DEBUG18(qla_printk(KERN_INFO, ha, | ||
1543 | "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd))); | ||
1544 | return QLA_FUNCTION_FAILED; | ||
1545 | } | ||
1546 | |||
1547 | |||
872 | static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) | 1548 | static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) |
873 | { | 1549 | { |
874 | struct scsi_cmnd *cmd = sp->cmd; | 1550 | struct scsi_cmnd *cmd = sp->cmd; |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index eed71ea1d947..be3d8bed2ecf 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include <scsi/scsi_tcq.h> | 11 | #include <scsi/scsi_tcq.h> |
12 | #include <scsi/scsi_bsg_fc.h> | 12 | #include <scsi/scsi_bsg_fc.h> |
13 | #include <scsi/scsi_eh.h> | ||
13 | 14 | ||
14 | static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); | 15 | static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); |
15 | static void qla2x00_process_completed_request(struct scsi_qla_host *, | 16 | static void qla2x00_process_completed_request(struct scsi_qla_host *, |
@@ -1364,6 +1365,78 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len, | |||
1364 | DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len)); | 1365 | DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len)); |
1365 | } | 1366 | } |
1366 | 1367 | ||
1368 | struct scsi_dif_tuple { | ||
1369 | __be16 guard; /* Checksum */ | ||
1370 | __be16 app_tag; /* APPL identifer */ | ||
1371 | __be32 ref_tag; /* Target LBA or indirect LBA */ | ||
1372 | }; | ||
1373 | |||
1374 | /* | ||
1375 | * Checks the guard or meta-data for the type of error | ||
1376 | * detected by the HBA. In case of errors, we set the | ||
1377 | * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST | ||
1378 | * to indicate to the kernel that the HBA detected error. | ||
1379 | */ | ||
1380 | static inline void | ||
1381 | qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) | ||
1382 | { | ||
1383 | struct scsi_cmnd *cmd = sp->cmd; | ||
1384 | struct scsi_dif_tuple *ep = | ||
1385 | (struct scsi_dif_tuple *)&sts24->data[20]; | ||
1386 | struct scsi_dif_tuple *ap = | ||
1387 | (struct scsi_dif_tuple *)&sts24->data[12]; | ||
1388 | uint32_t e_ref_tag, a_ref_tag; | ||
1389 | uint16_t e_app_tag, a_app_tag; | ||
1390 | uint16_t e_guard, a_guard; | ||
1391 | |||
1392 | e_ref_tag = be32_to_cpu(ep->ref_tag); | ||
1393 | a_ref_tag = be32_to_cpu(ap->ref_tag); | ||
1394 | e_app_tag = be16_to_cpu(ep->app_tag); | ||
1395 | a_app_tag = be16_to_cpu(ap->app_tag); | ||
1396 | e_guard = be16_to_cpu(ep->guard); | ||
1397 | a_guard = be16_to_cpu(ap->guard); | ||
1398 | |||
1399 | DEBUG18(printk(KERN_DEBUG | ||
1400 | "%s(): iocb(s) %p Returned STATUS\n", __func__, sts24)); | ||
1401 | |||
1402 | DEBUG18(printk(KERN_ERR "DIF ERROR in cmd 0x%x lba 0x%llx act ref" | ||
1403 | " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" | ||
1404 | " tag=0x%x, act guard=0x%x, exp guard=0x%x\n", | ||
1405 | cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, | ||
1406 | a_app_tag, e_app_tag, a_guard, e_guard)); | ||
1407 | |||
1408 | |||
1409 | /* check guard */ | ||
1410 | if (e_guard != a_guard) { | ||
1411 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, | ||
1412 | 0x10, 0x1); | ||
1413 | set_driver_byte(cmd, DRIVER_SENSE); | ||
1414 | set_host_byte(cmd, DID_ABORT); | ||
1415 | cmd->result |= SAM_STAT_CHECK_CONDITION << 1; | ||
1416 | return; | ||
1417 | } | ||
1418 | |||
1419 | /* check appl tag */ | ||
1420 | if (e_app_tag != a_app_tag) { | ||
1421 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, | ||
1422 | 0x10, 0x2); | ||
1423 | set_driver_byte(cmd, DRIVER_SENSE); | ||
1424 | set_host_byte(cmd, DID_ABORT); | ||
1425 | cmd->result |= SAM_STAT_CHECK_CONDITION << 1; | ||
1426 | return; | ||
1427 | } | ||
1428 | |||
1429 | /* check ref tag */ | ||
1430 | if (e_ref_tag != a_ref_tag) { | ||
1431 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, | ||
1432 | 0x10, 0x3); | ||
1433 | set_driver_byte(cmd, DRIVER_SENSE); | ||
1434 | set_host_byte(cmd, DID_ABORT); | ||
1435 | cmd->result |= SAM_STAT_CHECK_CONDITION << 1; | ||
1436 | return; | ||
1437 | } | ||
1438 | } | ||
1439 | |||
1367 | /** | 1440 | /** |
1368 | * qla2x00_status_entry() - Process a Status IOCB entry. | 1441 | * qla2x00_status_entry() - Process a Status IOCB entry. |
1369 | * @ha: SCSI driver HA context | 1442 | * @ha: SCSI driver HA context |
@@ -1630,6 +1703,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) | |||
1630 | case CS_ABORTED: | 1703 | case CS_ABORTED: |
1631 | cp->result = DID_RESET << 16; | 1704 | cp->result = DID_RESET << 16; |
1632 | break; | 1705 | break; |
1706 | |||
1707 | case CS_DIF_ERROR: | ||
1708 | qla2x00_handle_dif_error(sp, sts24); | ||
1709 | break; | ||
1633 | default: | 1710 | default: |
1634 | cp->result = DID_ERROR << 16; | 1711 | cp->result = DID_ERROR << 16; |
1635 | break; | 1712 | break; |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 523d414b59af..5104aefdc7e6 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -92,6 +92,19 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); | |||
92 | MODULE_PARM_DESC(ql2xmaxqdepth, | 92 | MODULE_PARM_DESC(ql2xmaxqdepth, |
93 | "Maximum queue depth to report for target devices."); | 93 | "Maximum queue depth to report for target devices."); |
94 | 94 | ||
95 | /* Do not change the value of this after module load */ | ||
96 | int ql2xenabledif = 1; | ||
97 | module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR); | ||
98 | MODULE_PARM_DESC(ql2xenabledif, | ||
99 | " Enable T10-CRC-DIF " | ||
100 | " Default is 0 - No DIF Support. 1 - Enable it"); | ||
101 | |||
102 | int ql2xenablehba_err_chk; | ||
103 | module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); | ||
104 | MODULE_PARM_DESC(ql2xenablehba_err_chk, | ||
105 | " Enable T10-CRC-DIF Error isolation by HBA" | ||
106 | " Default is 0 - Error isolation disabled, 1 - Enable it"); | ||
107 | |||
95 | int ql2xiidmaenable=1; | 108 | int ql2xiidmaenable=1; |
96 | module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR); | 109 | module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR); |
97 | MODULE_PARM_DESC(ql2xiidmaenable, | 110 | MODULE_PARM_DESC(ql2xiidmaenable, |
@@ -537,6 +550,14 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | |||
537 | if (fcport->drport) | 550 | if (fcport->drport) |
538 | goto qc24_target_busy; | 551 | goto qc24_target_busy; |
539 | 552 | ||
553 | if (!vha->flags.difdix_supported && | ||
554 | scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { | ||
555 | DEBUG2(qla_printk(KERN_ERR, ha, | ||
556 | "DIF Cap Not Reg, fail DIF capable cmd's:%x\n", | ||
557 | cmd->cmnd[0])); | ||
558 | cmd->result = DID_NO_CONNECT << 16; | ||
559 | goto qc24_fail_command; | ||
560 | } | ||
540 | if (atomic_read(&fcport->state) != FCS_ONLINE) { | 561 | if (atomic_read(&fcport->state) != FCS_ONLINE) { |
541 | if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || | 562 | if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || |
542 | atomic_read(&base_vha->loop_state) == LOOP_DEAD) { | 563 | atomic_read(&base_vha->loop_state) == LOOP_DEAD) { |
@@ -776,7 +797,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
776 | 797 | ||
777 | if (sp == NULL) | 798 | if (sp == NULL) |
778 | continue; | 799 | continue; |
779 | if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID)) | 800 | if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID) && |
801 | !IS_PROT_IO(sp)) | ||
780 | continue; | 802 | continue; |
781 | if (sp->cmd != cmd) | 803 | if (sp->cmd != cmd) |
782 | continue; | 804 | continue; |
@@ -842,7 +864,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, | |||
842 | sp = req->outstanding_cmds[cnt]; | 864 | sp = req->outstanding_cmds[cnt]; |
843 | if (!sp) | 865 | if (!sp) |
844 | continue; | 866 | continue; |
845 | if (sp->ctx) | 867 | if ((sp->ctx) && !IS_PROT_IO(sp)) |
846 | continue; | 868 | continue; |
847 | if (vha->vp_idx != sp->fcport->vha->vp_idx) | 869 | if (vha->vp_idx != sp->fcport->vha->vp_idx) |
848 | continue; | 870 | continue; |
@@ -1189,7 +1211,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | |||
1189 | if (sp) { | 1211 | if (sp) { |
1190 | req->outstanding_cmds[cnt] = NULL; | 1212 | req->outstanding_cmds[cnt] = NULL; |
1191 | if (!sp->ctx || | 1213 | if (!sp->ctx || |
1192 | (sp->flags & SRB_FCP_CMND_DMA_VALID)) { | 1214 | (sp->flags & SRB_FCP_CMND_DMA_VALID) || |
1215 | IS_PROT_IO(sp)) { | ||
1193 | sp->cmd->result = res; | 1216 | sp->cmd->result = res; |
1194 | qla2x00_sp_compl(ha, sp); | 1217 | qla2x00_sp_compl(ha, sp); |
1195 | } else { | 1218 | } else { |
@@ -1553,7 +1576,7 @@ static struct isp_operations qla25xx_isp_ops = { | |||
1553 | .read_optrom = qla25xx_read_optrom_data, | 1576 | .read_optrom = qla25xx_read_optrom_data, |
1554 | .write_optrom = qla24xx_write_optrom_data, | 1577 | .write_optrom = qla24xx_write_optrom_data, |
1555 | .get_flash_version = qla24xx_get_flash_version, | 1578 | .get_flash_version = qla24xx_get_flash_version, |
1556 | .start_scsi = qla24xx_start_scsi, | 1579 | .start_scsi = qla24xx_dif_start_scsi, |
1557 | .abort_isp = qla2x00_abort_isp, | 1580 | .abort_isp = qla2x00_abort_isp, |
1558 | }; | 1581 | }; |
1559 | 1582 | ||
@@ -2185,6 +2208,22 @@ skip_dpc: | |||
2185 | DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", | 2208 | DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", |
2186 | base_vha->host_no, ha)); | 2209 | base_vha->host_no, ha)); |
2187 | 2210 | ||
2211 | if (IS_QLA25XX(ha) && ql2xenabledif) { | ||
2212 | if (ha->fw_attributes & BIT_4) { | ||
2213 | base_vha->flags.difdix_supported = 1; | ||
2214 | DEBUG18(qla_printk(KERN_INFO, ha, | ||
2215 | "Registering for DIF/DIX type 1 and 3" | ||
2216 | " protection.\n")); | ||
2217 | scsi_host_set_prot(host, | ||
2218 | SHOST_DIF_TYPE1_PROTECTION | ||
2219 | | SHOST_DIF_TYPE3_PROTECTION | ||
2220 | | SHOST_DIX_TYPE1_PROTECTION | ||
2221 | | SHOST_DIX_TYPE3_PROTECTION); | ||
2222 | scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC); | ||
2223 | } else | ||
2224 | base_vha->flags.difdix_supported = 0; | ||
2225 | } | ||
2226 | |||
2188 | ha->isp_ops->enable_intrs(ha); | 2227 | ha->isp_ops->enable_intrs(ha); |
2189 | 2228 | ||
2190 | ret = scsi_add_host(host, &pdev->dev); | 2229 | ret = scsi_add_host(host, &pdev->dev); |
@@ -2546,7 +2585,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, | |||
2546 | if (!ha->s_dma_pool) | 2585 | if (!ha->s_dma_pool) |
2547 | goto fail_free_nvram; | 2586 | goto fail_free_nvram; |
2548 | 2587 | ||
2549 | if (IS_QLA82XX(ha)) { | 2588 | if (IS_QLA82XX(ha) || ql2xenabledif) { |
2550 | ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, | 2589 | ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, |
2551 | DSD_LIST_DMA_POOL_SIZE, 8, 0); | 2590 | DSD_LIST_DMA_POOL_SIZE, 8, 0); |
2552 | if (!ha->dl_dma_pool) { | 2591 | if (!ha->dl_dma_pool) { |
@@ -2678,12 +2717,12 @@ fail_free_ms_iocb: | |||
2678 | ha->ms_iocb = NULL; | 2717 | ha->ms_iocb = NULL; |
2679 | ha->ms_iocb_dma = 0; | 2718 | ha->ms_iocb_dma = 0; |
2680 | fail_dma_pool: | 2719 | fail_dma_pool: |
2681 | if (IS_QLA82XX(ha)) { | 2720 | if (IS_QLA82XX(ha) || ql2xenabledif) { |
2682 | dma_pool_destroy(ha->fcp_cmnd_dma_pool); | 2721 | dma_pool_destroy(ha->fcp_cmnd_dma_pool); |
2683 | ha->fcp_cmnd_dma_pool = NULL; | 2722 | ha->fcp_cmnd_dma_pool = NULL; |
2684 | } | 2723 | } |
2685 | fail_dl_dma_pool: | 2724 | fail_dl_dma_pool: |
2686 | if (IS_QLA82XX(ha)) { | 2725 | if (IS_QLA82XX(ha) || ql2xenabledif) { |
2687 | dma_pool_destroy(ha->dl_dma_pool); | 2726 | dma_pool_destroy(ha->dl_dma_pool); |
2688 | ha->dl_dma_pool = NULL; | 2727 | ha->dl_dma_pool = NULL; |
2689 | } | 2728 | } |
@@ -3346,11 +3385,31 @@ static void | |||
3346 | qla2x00_sp_free_dma(srb_t *sp) | 3385 | qla2x00_sp_free_dma(srb_t *sp) |
3347 | { | 3386 | { |
3348 | struct scsi_cmnd *cmd = sp->cmd; | 3387 | struct scsi_cmnd *cmd = sp->cmd; |
3388 | struct qla_hw_data *ha = sp->fcport->vha->hw; | ||
3349 | 3389 | ||
3350 | if (sp->flags & SRB_DMA_VALID) { | 3390 | if (sp->flags & SRB_DMA_VALID) { |
3351 | scsi_dma_unmap(cmd); | 3391 | scsi_dma_unmap(cmd); |
3352 | sp->flags &= ~SRB_DMA_VALID; | 3392 | sp->flags &= ~SRB_DMA_VALID; |
3353 | } | 3393 | } |
3394 | |||
3395 | if (sp->flags & SRB_CRC_PROT_DMA_VALID) { | ||
3396 | dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), | ||
3397 | scsi_prot_sg_count(cmd), cmd->sc_data_direction); | ||
3398 | sp->flags &= ~SRB_CRC_PROT_DMA_VALID; | ||
3399 | } | ||
3400 | |||
3401 | if (sp->flags & SRB_CRC_CTX_DSD_VALID) { | ||
3402 | /* List assured to be having elements */ | ||
3403 | qla2x00_clean_dsd_pool(ha, sp); | ||
3404 | sp->flags &= ~SRB_CRC_CTX_DSD_VALID; | ||
3405 | } | ||
3406 | |||
3407 | if (sp->flags & SRB_CRC_CTX_DMA_VALID) { | ||
3408 | dma_pool_free(ha->dl_dma_pool, sp->ctx, | ||
3409 | ((struct crc_context *)sp->ctx)->crc_ctx_dma); | ||
3410 | sp->flags &= ~SRB_CRC_CTX_DMA_VALID; | ||
3411 | } | ||
3412 | |||
3354 | CMD_SP(cmd) = NULL; | 3413 | CMD_SP(cmd) = NULL; |
3355 | } | 3414 | } |
3356 | 3415 | ||
@@ -3464,7 +3523,7 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
3464 | sp = req->outstanding_cmds[index]; | 3523 | sp = req->outstanding_cmds[index]; |
3465 | if (!sp) | 3524 | if (!sp) |
3466 | continue; | 3525 | continue; |
3467 | if (sp->ctx) | 3526 | if (sp->ctx && !IS_PROT_IO(sp)) |
3468 | continue; | 3527 | continue; |
3469 | sfcp = sp->fcport; | 3528 | sfcp = sp->fcport; |
3470 | if (!(sfcp->flags & FCF_FCP2_DEVICE)) | 3529 | if (!(sfcp->flags & FCF_FCP2_DEVICE)) |