diff options
-rw-r--r-- | drivers/scsi/qla2xxx/qla_attr.c | 30 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_dbg.c | 67 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_dbg.h | 15 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_def.h | 64 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_fw.h | 14 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_gbl.h | 41 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_gs.c | 6 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_init.c | 192 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_inline.h | 26 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_iocb.c | 158 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_isr.c | 335 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_mbx.c | 147 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_mid.c | 349 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_os.c | 423 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_sup.c | 33 | ||||
-rw-r--r-- | drivers/scsi/qla2xxx/qla_version.h | 4 |
16 files changed, 1443 insertions, 461 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index b22384229378..cd53627cc761 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1143,8 +1143,11 @@ static int | |||
1143 | qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) | 1143 | qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) |
1144 | { | 1144 | { |
1145 | int ret = 0; | 1145 | int ret = 0; |
1146 | int cnt = 0; | ||
1147 | uint8_t qos = QLA_DEFAULT_QUE_QOS; | ||
1146 | scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); | 1148 | scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); |
1147 | scsi_qla_host_t *vha = NULL; | 1149 | scsi_qla_host_t *vha = NULL; |
1150 | struct qla_hw_data *ha = base_vha->hw; | ||
1148 | 1151 | ||
1149 | ret = qla24xx_vport_create_req_sanity_check(fc_vport); | 1152 | ret = qla24xx_vport_create_req_sanity_check(fc_vport); |
1150 | if (ret) { | 1153 | if (ret) { |
@@ -1200,6 +1203,22 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) | |||
1200 | 1203 | ||
1201 | qla24xx_vport_disable(fc_vport, disable); | 1204 | qla24xx_vport_disable(fc_vport, disable); |
1202 | 1205 | ||
1206 | /* Create a queue pair for the vport */ | ||
1207 | if (ha->mqenable) { | ||
1208 | if (ha->npiv_info) { | ||
1209 | for (; cnt < ha->nvram_npiv_size; cnt++) { | ||
1210 | if (ha->npiv_info[cnt].port_name == | ||
1211 | vha->port_name && | ||
1212 | ha->npiv_info[cnt].node_name == | ||
1213 | vha->node_name) { | ||
1214 | qos = ha->npiv_info[cnt].q_qos; | ||
1215 | break; | ||
1216 | } | ||
1217 | } | ||
1218 | } | ||
1219 | qla25xx_create_queues(vha, qos); | ||
1220 | } | ||
1221 | |||
1203 | return 0; | 1222 | return 0; |
1204 | vport_create_failed_2: | 1223 | vport_create_failed_2: |
1205 | qla24xx_disable_vp(vha); | 1224 | qla24xx_disable_vp(vha); |
@@ -1213,11 +1232,20 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | |||
1213 | { | 1232 | { |
1214 | scsi_qla_host_t *vha = fc_vport->dd_data; | 1233 | scsi_qla_host_t *vha = fc_vport->dd_data; |
1215 | fc_port_t *fcport, *tfcport; | 1234 | fc_port_t *fcport, *tfcport; |
1235 | struct qla_hw_data *ha = vha->hw; | ||
1236 | uint16_t id = vha->vp_idx; | ||
1216 | 1237 | ||
1217 | while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) || | 1238 | while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) || |
1218 | test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) | 1239 | test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) |
1219 | msleep(1000); | 1240 | msleep(1000); |
1220 | 1241 | ||
1242 | if (ha->mqenable) { | ||
1243 | if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS) | ||
1244 | qla_printk(KERN_WARNING, ha, | ||
1245 | "Queue delete failed.\n"); | ||
1246 | vha->req_ques[0] = ha->req_q_map[0]->id; | ||
1247 | } | ||
1248 | |||
1221 | qla24xx_disable_vp(vha); | 1249 | qla24xx_disable_vp(vha); |
1222 | 1250 | ||
1223 | fc_remove_host(vha->host); | 1251 | fc_remove_host(vha->host); |
@@ -1240,7 +1268,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | |||
1240 | } | 1268 | } |
1241 | 1269 | ||
1242 | scsi_host_put(vha->host); | 1270 | scsi_host_put(vha->host); |
1243 | 1271 | qla_printk(KERN_INFO, ha, "vport %d deleted\n", id); | |
1244 | return 0; | 1272 | return 0; |
1245 | } | 1273 | } |
1246 | 1274 | ||
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index f15f903aec55..1cf77772623b 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
@@ -23,11 +23,10 @@ qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump) | |||
23 | } | 23 | } |
24 | 24 | ||
25 | static inline void * | 25 | static inline void * |
26 | qla2xxx_copy_queues(scsi_qla_host_t *vha, void *ptr) | 26 | qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr) |
27 | { | 27 | { |
28 | struct req_que *req = vha->hw->req; | 28 | struct req_que *req = ha->req_q_map[0]; |
29 | struct rsp_que *rsp = vha->hw->rsp; | 29 | struct rsp_que *rsp = ha->rsp_q_map[0]; |
30 | |||
31 | /* Request queue. */ | 30 | /* Request queue. */ |
32 | memcpy(ptr, req->ring, req->length * | 31 | memcpy(ptr, req->ring, req->length * |
33 | sizeof(request_t)); | 32 | sizeof(request_t)); |
@@ -327,6 +326,7 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
327 | unsigned long flags; | 326 | unsigned long flags; |
328 | struct qla2300_fw_dump *fw; | 327 | struct qla2300_fw_dump *fw; |
329 | void *nxt; | 328 | void *nxt; |
329 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); | ||
330 | 330 | ||
331 | flags = 0; | 331 | flags = 0; |
332 | 332 | ||
@@ -461,7 +461,7 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
461 | ha->fw_memory_size - 0x11000 + 1, &nxt); | 461 | ha->fw_memory_size - 0x11000 + 1, &nxt); |
462 | 462 | ||
463 | if (rval == QLA_SUCCESS) | 463 | if (rval == QLA_SUCCESS) |
464 | qla2xxx_copy_queues(vha, nxt); | 464 | qla2xxx_copy_queues(ha, nxt); |
465 | 465 | ||
466 | if (rval != QLA_SUCCESS) { | 466 | if (rval != QLA_SUCCESS) { |
467 | qla_printk(KERN_WARNING, ha, | 467 | qla_printk(KERN_WARNING, ha, |
@@ -471,7 +471,7 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
471 | } else { | 471 | } else { |
472 | qla_printk(KERN_INFO, ha, | 472 | qla_printk(KERN_INFO, ha, |
473 | "Firmware dump saved to temp buffer (%ld/%p).\n", | 473 | "Firmware dump saved to temp buffer (%ld/%p).\n", |
474 | vha->host_no, ha->fw_dump); | 474 | base_vha->host_no, ha->fw_dump); |
475 | ha->fw_dumped = 1; | 475 | ha->fw_dumped = 1; |
476 | } | 476 | } |
477 | 477 | ||
@@ -497,6 +497,7 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
497 | uint16_t __iomem *dmp_reg; | 497 | uint16_t __iomem *dmp_reg; |
498 | unsigned long flags; | 498 | unsigned long flags; |
499 | struct qla2100_fw_dump *fw; | 499 | struct qla2100_fw_dump *fw; |
500 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); | ||
500 | 501 | ||
501 | risc_address = 0; | 502 | risc_address = 0; |
502 | mb0 = mb2 = 0; | 503 | mb0 = mb2 = 0; |
@@ -667,7 +668,7 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
667 | } | 668 | } |
668 | 669 | ||
669 | if (rval == QLA_SUCCESS) | 670 | if (rval == QLA_SUCCESS) |
670 | qla2xxx_copy_queues(vha, &fw->risc_ram[cnt]); | 671 | qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]); |
671 | 672 | ||
672 | if (rval != QLA_SUCCESS) { | 673 | if (rval != QLA_SUCCESS) { |
673 | qla_printk(KERN_WARNING, ha, | 674 | qla_printk(KERN_WARNING, ha, |
@@ -677,7 +678,7 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
677 | } else { | 678 | } else { |
678 | qla_printk(KERN_INFO, ha, | 679 | qla_printk(KERN_INFO, ha, |
679 | "Firmware dump saved to temp buffer (%ld/%p).\n", | 680 | "Firmware dump saved to temp buffer (%ld/%p).\n", |
680 | vha->host_no, ha->fw_dump); | 681 | base_vha->host_no, ha->fw_dump); |
681 | ha->fw_dumped = 1; | 682 | ha->fw_dumped = 1; |
682 | } | 683 | } |
683 | 684 | ||
@@ -701,6 +702,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
701 | struct qla24xx_fw_dump *fw; | 702 | struct qla24xx_fw_dump *fw; |
702 | uint32_t ext_mem_cnt; | 703 | uint32_t ext_mem_cnt; |
703 | void *nxt; | 704 | void *nxt; |
705 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); | ||
704 | 706 | ||
705 | risc_address = ext_mem_cnt = 0; | 707 | risc_address = ext_mem_cnt = 0; |
706 | flags = 0; | 708 | flags = 0; |
@@ -910,7 +912,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
910 | if (rval != QLA_SUCCESS) | 912 | if (rval != QLA_SUCCESS) |
911 | goto qla24xx_fw_dump_failed_0; | 913 | goto qla24xx_fw_dump_failed_0; |
912 | 914 | ||
913 | nxt = qla2xxx_copy_queues(vha, nxt); | 915 | nxt = qla2xxx_copy_queues(ha, nxt); |
914 | if (ha->eft) | 916 | if (ha->eft) |
915 | memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size)); | 917 | memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size)); |
916 | 918 | ||
@@ -923,7 +925,7 @@ qla24xx_fw_dump_failed_0: | |||
923 | } else { | 925 | } else { |
924 | qla_printk(KERN_INFO, ha, | 926 | qla_printk(KERN_INFO, ha, |
925 | "Firmware dump saved to temp buffer (%ld/%p).\n", | 927 | "Firmware dump saved to temp buffer (%ld/%p).\n", |
926 | vha->host_no, ha->fw_dump); | 928 | base_vha->host_no, ha->fw_dump); |
927 | ha->fw_dumped = 1; | 929 | ha->fw_dumped = 1; |
928 | } | 930 | } |
929 | 931 | ||
@@ -940,6 +942,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
940 | uint32_t risc_address; | 942 | uint32_t risc_address; |
941 | struct qla_hw_data *ha = vha->hw; | 943 | struct qla_hw_data *ha = vha->hw; |
942 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | 944 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; |
945 | struct device_reg_25xxmq __iomem *reg25; | ||
943 | uint32_t __iomem *dmp_reg; | 946 | uint32_t __iomem *dmp_reg; |
944 | uint32_t *iter_reg; | 947 | uint32_t *iter_reg; |
945 | uint16_t __iomem *mbx_reg; | 948 | uint16_t __iomem *mbx_reg; |
@@ -948,6 +951,11 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
948 | uint32_t ext_mem_cnt; | 951 | uint32_t ext_mem_cnt; |
949 | void *nxt; | 952 | void *nxt; |
950 | struct qla2xxx_fce_chain *fcec; | 953 | struct qla2xxx_fce_chain *fcec; |
954 | struct qla2xxx_mq_chain *mq = NULL; | ||
955 | uint32_t qreg_size; | ||
956 | uint8_t req_cnt, rsp_cnt, que_cnt; | ||
957 | uint32_t que_idx; | ||
958 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); | ||
951 | 959 | ||
952 | risc_address = ext_mem_cnt = 0; | 960 | risc_address = ext_mem_cnt = 0; |
953 | flags = 0; | 961 | flags = 0; |
@@ -992,6 +1000,29 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
992 | fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); | 1000 | fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); |
993 | fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); | 1001 | fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); |
994 | fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); | 1002 | fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); |
1003 | |||
1004 | /* Multi queue registers */ | ||
1005 | if (ha->mqenable) { | ||
1006 | qreg_size = sizeof(struct qla2xxx_mq_chain); | ||
1007 | mq = kzalloc(qreg_size, GFP_KERNEL); | ||
1008 | if (!mq) | ||
1009 | goto qla25xx_fw_dump_failed_0; | ||
1010 | req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues); | ||
1011 | rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); | ||
1012 | que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt; | ||
1013 | mq->count = htonl(que_cnt); | ||
1014 | mq->chain_size = htonl(qreg_size); | ||
1015 | mq->type = __constant_htonl(DUMP_CHAIN_MQ); | ||
1016 | for (cnt = 0; cnt < que_cnt; cnt++) { | ||
1017 | reg25 = (struct device_reg_25xxmq *) ((void *) | ||
1018 | ha->mqiobase + cnt * QLA_QUE_PAGE); | ||
1019 | que_idx = cnt * 4; | ||
1020 | mq->qregs[que_idx] = htonl(reg25->req_q_in); | ||
1021 | mq->qregs[que_idx+1] = htonl(reg25->req_q_out); | ||
1022 | mq->qregs[que_idx+2] = htonl(reg25->rsp_q_in); | ||
1023 | mq->qregs[que_idx+3] = htonl(reg25->rsp_q_out); | ||
1024 | } | ||
1025 | } | ||
995 | WRT_REG_DWORD(®->iobase_window, 0x00); | 1026 | WRT_REG_DWORD(®->iobase_window, 0x00); |
996 | RD_REG_DWORD(®->iobase_window); | 1027 | RD_REG_DWORD(®->iobase_window); |
997 | 1028 | ||
@@ -1219,7 +1250,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
1219 | goto qla25xx_fw_dump_failed_0; | 1250 | goto qla25xx_fw_dump_failed_0; |
1220 | 1251 | ||
1221 | /* Fibre Channel Trace Buffer. */ | 1252 | /* Fibre Channel Trace Buffer. */ |
1222 | nxt = qla2xxx_copy_queues(vha, nxt); | 1253 | nxt = qla2xxx_copy_queues(ha, nxt); |
1223 | if (ha->eft) | 1254 | if (ha->eft) |
1224 | memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size)); | 1255 | memcpy(nxt, ha->eft, ntohl(ha->fw_dump->eft_size)); |
1225 | 1256 | ||
@@ -1229,7 +1260,14 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
1229 | 1260 | ||
1230 | ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); | 1261 | ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); |
1231 | 1262 | ||
1232 | fcec = nxt + ntohl(ha->fw_dump->eft_size); | 1263 | if (ha->mqenable) { |
1264 | nxt = nxt + ntohl(ha->fw_dump->eft_size); | ||
1265 | memcpy(nxt, mq, qreg_size); | ||
1266 | kfree(mq); | ||
1267 | fcec = nxt + qreg_size; | ||
1268 | } else { | ||
1269 | fcec = nxt + ntohl(ha->fw_dump->eft_size); | ||
1270 | } | ||
1233 | fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST); | 1271 | fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST); |
1234 | fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) + | 1272 | fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) + |
1235 | fce_calc_size(ha->fce_bufs)); | 1273 | fce_calc_size(ha->fce_bufs)); |
@@ -1252,7 +1290,7 @@ qla25xx_fw_dump_failed_0: | |||
1252 | } else { | 1290 | } else { |
1253 | qla_printk(KERN_INFO, ha, | 1291 | qla_printk(KERN_INFO, ha, |
1254 | "Firmware dump saved to temp buffer (%ld/%p).\n", | 1292 | "Firmware dump saved to temp buffer (%ld/%p).\n", |
1255 | vha->host_no, ha->fw_dump); | 1293 | base_vha->host_no, ha->fw_dump); |
1256 | ha->fw_dumped = 1; | 1294 | ha->fw_dumped = 1; |
1257 | } | 1295 | } |
1258 | 1296 | ||
@@ -1260,7 +1298,6 @@ qla25xx_fw_dump_failed: | |||
1260 | if (!hardware_locked) | 1298 | if (!hardware_locked) |
1261 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1299 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
1262 | } | 1300 | } |
1263 | |||
1264 | /****************************************************************************/ | 1301 | /****************************************************************************/ |
1265 | /* Driver Debug Functions. */ | 1302 | /* Driver Debug Functions. */ |
1266 | /****************************************************************************/ | 1303 | /****************************************************************************/ |
@@ -1307,3 +1344,5 @@ qla2x00_dump_buffer(uint8_t * b, uint32_t size) | |||
1307 | if (cnt % 16) | 1344 | if (cnt % 16) |
1308 | printk("\n"); | 1345 | printk("\n"); |
1309 | } | 1346 | } |
1347 | |||
1348 | |||
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 2e9c0c097f5e..c1794a70a45f 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h | |||
@@ -4,6 +4,9 @@ | |||
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | |||
8 | #include "qla_def.h" | ||
9 | |||
7 | /* | 10 | /* |
8 | * Driver debug definitions. | 11 | * Driver debug definitions. |
9 | */ | 12 | */ |
@@ -23,6 +26,7 @@ | |||
23 | /* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */ | 26 | /* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */ |
24 | /* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ | 27 | /* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ |
25 | /* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ | 28 | /* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ |
29 | /* #define QL_DEBUG_LEVEL_17 */ /* Output MULTI-Q trace messages */ | ||
26 | 30 | ||
27 | /* | 31 | /* |
28 | * Macros use for debugging the driver. | 32 | * Macros use for debugging the driver. |
@@ -43,6 +47,7 @@ | |||
43 | #define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0) | 47 | #define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0) |
44 | #define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0) | 48 | #define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0) |
45 | #define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0) | 49 | #define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0) |
50 | #define DEBUG2_17(x) do { if (ql2xextended_error_logging) { x; } } while (0) | ||
46 | 51 | ||
47 | #if defined(QL_DEBUG_LEVEL_3) | 52 | #if defined(QL_DEBUG_LEVEL_3) |
48 | #define DEBUG3(x) do {x;} while (0) | 53 | #define DEBUG3(x) do {x;} while (0) |
@@ -127,7 +132,6 @@ | |||
127 | #else | 132 | #else |
128 | #define DEBUG16(x) do {} while (0) | 133 | #define DEBUG16(x) do {} while (0) |
129 | #endif | 134 | #endif |
130 | |||
131 | /* | 135 | /* |
132 | * Firmware Dump structure definition | 136 | * Firmware Dump structure definition |
133 | */ | 137 | */ |
@@ -266,8 +270,17 @@ struct qla2xxx_fce_chain { | |||
266 | uint32_t eregs[8]; | 270 | uint32_t eregs[8]; |
267 | }; | 271 | }; |
268 | 272 | ||
273 | struct qla2xxx_mq_chain { | ||
274 | uint32_t type; | ||
275 | uint32_t chain_size; | ||
276 | |||
277 | uint32_t count; | ||
278 | uint32_t qregs[4 * QLA_MQ_SIZE]; | ||
279 | }; | ||
280 | |||
269 | #define DUMP_CHAIN_VARIANT 0x80000000 | 281 | #define DUMP_CHAIN_VARIANT 0x80000000 |
270 | #define DUMP_CHAIN_FCE 0x7FFFFAF0 | 282 | #define DUMP_CHAIN_FCE 0x7FFFFAF0 |
283 | #define DUMP_CHAIN_MQ 0x7FFFFAF1 | ||
271 | #define DUMP_CHAIN_LAST 0x80000000 | 284 | #define DUMP_CHAIN_LAST 0x80000000 |
272 | 285 | ||
273 | struct qla2xxx_fw_dump { | 286 | struct qla2xxx_fw_dump { |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index edead2802517..5ecf29283b6b 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -369,9 +369,17 @@ struct device_reg_2xxx { | |||
369 | } u_end; | 369 | } u_end; |
370 | }; | 370 | }; |
371 | 371 | ||
372 | struct device_reg_25xxmq { | ||
373 | volatile uint32_t req_q_in; | ||
374 | volatile uint32_t req_q_out; | ||
375 | volatile uint32_t rsp_q_in; | ||
376 | volatile uint32_t rsp_q_out; | ||
377 | }; | ||
378 | |||
372 | typedef union { | 379 | typedef union { |
373 | struct device_reg_2xxx isp; | 380 | struct device_reg_2xxx isp; |
374 | struct device_reg_24xx isp24; | 381 | struct device_reg_24xx isp24; |
382 | struct device_reg_25xxmq isp25mq; | ||
375 | } device_reg_t; | 383 | } device_reg_t; |
376 | 384 | ||
377 | #define ISP_REQ_Q_IN(ha, reg) \ | 385 | #define ISP_REQ_Q_IN(ha, reg) \ |
@@ -2037,6 +2045,7 @@ typedef struct vport_params { | |||
2037 | #define VP_RET_CODE_NOT_FOUND 6 | 2045 | #define VP_RET_CODE_NOT_FOUND 6 |
2038 | 2046 | ||
2039 | struct qla_hw_data; | 2047 | struct qla_hw_data; |
2048 | struct req_que; | ||
2040 | 2049 | ||
2041 | /* | 2050 | /* |
2042 | * ISP operations | 2051 | * ISP operations |
@@ -2059,7 +2068,8 @@ struct isp_operations { | |||
2059 | void (*enable_intrs) (struct qla_hw_data *); | 2068 | void (*enable_intrs) (struct qla_hw_data *); |
2060 | void (*disable_intrs) (struct qla_hw_data *); | 2069 | void (*disable_intrs) (struct qla_hw_data *); |
2061 | 2070 | ||
2062 | int (*abort_command) (struct scsi_qla_host *, srb_t *); | 2071 | int (*abort_command) (struct scsi_qla_host *, srb_t *, |
2072 | struct req_que *); | ||
2063 | int (*target_reset) (struct fc_port *, unsigned int); | 2073 | int (*target_reset) (struct fc_port *, unsigned int); |
2064 | int (*lun_reset) (struct fc_port *, unsigned int); | 2074 | int (*lun_reset) (struct fc_port *, unsigned int); |
2065 | int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, | 2075 | int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, |
@@ -2102,16 +2112,18 @@ struct isp_operations { | |||
2102 | #define QLA_MSIX_DEFAULT 0x00 | 2112 | #define QLA_MSIX_DEFAULT 0x00 |
2103 | #define QLA_MSIX_RSP_Q 0x01 | 2113 | #define QLA_MSIX_RSP_Q 0x01 |
2104 | 2114 | ||
2105 | #define QLA_MSIX_ENTRIES 2 | ||
2106 | #define QLA_MIDX_DEFAULT 0 | 2115 | #define QLA_MIDX_DEFAULT 0 |
2107 | #define QLA_MIDX_RSP_Q 1 | 2116 | #define QLA_MIDX_RSP_Q 1 |
2117 | #define QLA_PCI_MSIX_CONTROL 0xa2 | ||
2108 | 2118 | ||
2109 | struct scsi_qla_host; | 2119 | struct scsi_qla_host; |
2120 | struct rsp_que; | ||
2110 | 2121 | ||
2111 | struct qla_msix_entry { | 2122 | struct qla_msix_entry { |
2112 | int have_irq; | 2123 | int have_irq; |
2113 | uint32_t msix_vector; | 2124 | uint32_t vector; |
2114 | uint16_t msix_entry; | 2125 | uint16_t entry; |
2126 | struct rsp_que *rsp; | ||
2115 | }; | 2127 | }; |
2116 | 2128 | ||
2117 | #define WATCH_INTERVAL 1 /* number of seconds */ | 2129 | #define WATCH_INTERVAL 1 /* number of seconds */ |
@@ -2162,6 +2174,23 @@ struct qla_statistics { | |||
2162 | uint64_t output_bytes; | 2174 | uint64_t output_bytes; |
2163 | }; | 2175 | }; |
2164 | 2176 | ||
2177 | /* Multi queue support */ | ||
2178 | #define MBC_INITIALIZE_MULTIQ 0x1f | ||
2179 | #define QLA_QUE_PAGE 0X1000 | ||
2180 | #define QLA_MQ_SIZE 32 | ||
2181 | #define QLA_MAX_HOST_QUES 16 | ||
2182 | #define QLA_MAX_QUEUES 256 | ||
2183 | #define ISP_QUE_REG(ha, id) \ | ||
2184 | ((ha->mqenable) ? \ | ||
2185 | ((void *)(ha->mqiobase) +\ | ||
2186 | (QLA_QUE_PAGE * id)) :\ | ||
2187 | ((void *)(ha->iobase))) | ||
2188 | #define QLA_REQ_QUE_ID(tag) \ | ||
2189 | ((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0) | ||
2190 | #define QLA_DEFAULT_QUE_QOS 5 | ||
2191 | #define QLA_PRECONFIG_VPORTS 32 | ||
2192 | #define QLA_MAX_VPORTS_QLA24XX 128 | ||
2193 | #define QLA_MAX_VPORTS_QLA25XX 256 | ||
2165 | /* Response queue data structure */ | 2194 | /* Response queue data structure */ |
2166 | struct rsp_que { | 2195 | struct rsp_que { |
2167 | dma_addr_t dma; | 2196 | dma_addr_t dma; |
@@ -2171,9 +2200,12 @@ struct rsp_que { | |||
2171 | uint16_t out_ptr; | 2200 | uint16_t out_ptr; |
2172 | uint16_t length; | 2201 | uint16_t length; |
2173 | uint16_t options; | 2202 | uint16_t options; |
2174 | uint16_t msix_vector; | ||
2175 | uint16_t rid; | 2203 | uint16_t rid; |
2204 | uint16_t id; | ||
2205 | uint16_t vp_idx; | ||
2176 | struct qla_hw_data *hw; | 2206 | struct qla_hw_data *hw; |
2207 | struct qla_msix_entry *msix; | ||
2208 | struct req_que *req; | ||
2177 | }; | 2209 | }; |
2178 | 2210 | ||
2179 | /* Request queue data structure */ | 2211 | /* Request queue data structure */ |
@@ -2187,10 +2219,10 @@ struct req_que { | |||
2187 | uint16_t length; | 2219 | uint16_t length; |
2188 | uint16_t options; | 2220 | uint16_t options; |
2189 | uint16_t rid; | 2221 | uint16_t rid; |
2222 | uint16_t id; | ||
2190 | uint16_t qos; | 2223 | uint16_t qos; |
2191 | uint16_t vp_idx; | 2224 | uint16_t vp_idx; |
2192 | struct rsp_que *asso_que; | 2225 | struct rsp_que *rsp; |
2193 | /* Outstandings ISP commands. */ | ||
2194 | srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS]; | 2226 | srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS]; |
2195 | uint32_t current_outstanding_cmd; | 2227 | uint32_t current_outstanding_cmd; |
2196 | int max_q_depth; | 2228 | int max_q_depth; |
@@ -2240,8 +2272,17 @@ struct qla_hw_data { | |||
2240 | resource_size_t pio_address; | 2272 | resource_size_t pio_address; |
2241 | 2273 | ||
2242 | #define MIN_IOBASE_LEN 0x100 | 2274 | #define MIN_IOBASE_LEN 0x100 |
2243 | struct req_que *req; | 2275 | /* Multi queue data structs */ |
2244 | struct rsp_que *rsp; | 2276 | device_reg_t *mqiobase; |
2277 | uint16_t msix_count; | ||
2278 | uint8_t mqenable; | ||
2279 | struct req_que **req_q_map; | ||
2280 | struct rsp_que **rsp_q_map; | ||
2281 | unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; | ||
2282 | unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; | ||
2283 | uint16_t max_queues; | ||
2284 | struct qla_npiv_entry *npiv_info; | ||
2285 | uint16_t nvram_npiv_size; | ||
2245 | 2286 | ||
2246 | uint16_t switch_cap; | 2287 | uint16_t switch_cap; |
2247 | #define FLOGI_SEQ_DEL BIT_8 | 2288 | #define FLOGI_SEQ_DEL BIT_8 |
@@ -2502,7 +2543,7 @@ struct qla_hw_data { | |||
2502 | uint16_t zio_timer; | 2543 | uint16_t zio_timer; |
2503 | struct fc_host_statistics fc_host_stat; | 2544 | struct fc_host_statistics fc_host_stat; |
2504 | 2545 | ||
2505 | struct qla_msix_entry msix_entries[QLA_MSIX_ENTRIES]; | 2546 | struct qla_msix_entry *msix_entries; |
2506 | 2547 | ||
2507 | struct list_head vp_list; /* list of VP */ | 2548 | struct list_head vp_list; /* list of VP */ |
2508 | unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) / | 2549 | unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) / |
@@ -2524,7 +2565,6 @@ typedef struct scsi_qla_host { | |||
2524 | struct list_head list; | 2565 | struct list_head list; |
2525 | struct list_head vp_fcports; /* list of fcports */ | 2566 | struct list_head vp_fcports; /* list of fcports */ |
2526 | struct list_head work_list; | 2567 | struct list_head work_list; |
2527 | |||
2528 | /* Commonly used flags and state information. */ | 2568 | /* Commonly used flags and state information. */ |
2529 | struct Scsi_Host *host; | 2569 | struct Scsi_Host *host; |
2530 | unsigned long host_no; | 2570 | unsigned long host_no; |
@@ -2640,9 +2680,9 @@ typedef struct scsi_qla_host { | |||
2640 | #define VP_ERR_FAB_LOGOUT 4 | 2680 | #define VP_ERR_FAB_LOGOUT 4 |
2641 | #define VP_ERR_ADAP_NORESOURCES 5 | 2681 | #define VP_ERR_ADAP_NORESOURCES 5 |
2642 | struct qla_hw_data *hw; | 2682 | struct qla_hw_data *hw; |
2683 | int req_ques[QLA_MAX_HOST_QUES]; | ||
2643 | } scsi_qla_host_t; | 2684 | } scsi_qla_host_t; |
2644 | 2685 | ||
2645 | |||
2646 | /* | 2686 | /* |
2647 | * Macros to help code, maintain, etc. | 2687 | * Macros to help code, maintain, etc. |
2648 | */ | 2688 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index d1d14202575a..ee1f1e794c2d 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h | |||
@@ -299,7 +299,8 @@ struct init_cb_24xx { | |||
299 | uint32_t response_q_address[2]; | 299 | uint32_t response_q_address[2]; |
300 | uint32_t prio_request_q_address[2]; | 300 | uint32_t prio_request_q_address[2]; |
301 | 301 | ||
302 | uint8_t reserved_2[8]; | 302 | uint16_t msix; |
303 | uint8_t reserved_2[6]; | ||
303 | 304 | ||
304 | uint16_t atio_q_inpointer; | 305 | uint16_t atio_q_inpointer; |
305 | uint16_t atio_q_length; | 306 | uint16_t atio_q_length; |
@@ -372,8 +373,9 @@ struct init_cb_24xx { | |||
372 | * BIT 17-31 = Reserved | 373 | * BIT 17-31 = Reserved |
373 | */ | 374 | */ |
374 | uint32_t firmware_options_3; | 375 | uint32_t firmware_options_3; |
375 | 376 | uint16_t qos; | |
376 | uint8_t reserved_3[24]; | 377 | uint16_t rid; |
378 | uint8_t reserved_3[20]; | ||
377 | }; | 379 | }; |
378 | 380 | ||
379 | /* | 381 | /* |
@@ -754,7 +756,8 @@ struct abort_entry_24xx { | |||
754 | 756 | ||
755 | uint32_t handle_to_abort; /* System handle to abort. */ | 757 | uint32_t handle_to_abort; /* System handle to abort. */ |
756 | 758 | ||
757 | uint8_t reserved_1[32]; | 759 | uint16_t req_que_no; |
760 | uint8_t reserved_1[30]; | ||
758 | 761 | ||
759 | uint8_t port_id[3]; /* PortID of destination port. */ | 762 | uint8_t port_id[3]; /* PortID of destination port. */ |
760 | uint8_t vp_index; | 763 | uint8_t vp_index; |
@@ -1258,7 +1261,8 @@ struct qla_npiv_header { | |||
1258 | struct qla_npiv_entry { | 1261 | struct qla_npiv_entry { |
1259 | uint16_t flags; | 1262 | uint16_t flags; |
1260 | uint16_t vf_id; | 1263 | uint16_t vf_id; |
1261 | uint16_t qos; | 1264 | uint8_t q_qos; |
1265 | uint8_t f_qos; | ||
1262 | uint16_t unused1; | 1266 | uint16_t unused1; |
1263 | uint8_t port_name[WWN_SIZE]; | 1267 | uint8_t port_name[WWN_SIZE]; |
1264 | uint8_t node_name[WWN_SIZE]; | 1268 | uint8_t node_name[WWN_SIZE]; |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index c0cc686d6cc3..d9712b543493 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
@@ -63,6 +63,7 @@ extern int ql2xallocfwdump; | |||
63 | extern int ql2xextended_error_logging; | 63 | extern int ql2xextended_error_logging; |
64 | extern int ql2xqfullrampup; | 64 | extern int ql2xqfullrampup; |
65 | extern int ql2xiidmaenable; | 65 | extern int ql2xiidmaenable; |
66 | extern int ql2xmaxqueues; | ||
66 | 67 | ||
67 | extern int qla2x00_loop_reset(scsi_qla_host_t *); | 68 | extern int qla2x00_loop_reset(scsi_qla_host_t *); |
68 | extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); | 69 | extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); |
@@ -97,7 +98,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *); | |||
97 | extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); | 98 | extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); |
98 | extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *); | 99 | extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *); |
99 | 100 | ||
100 | extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *); | 101 | extern void qla2x00_sp_compl(struct qla_hw_data *, srb_t *); |
101 | 102 | ||
102 | extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); | 103 | extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); |
103 | 104 | ||
@@ -109,8 +110,9 @@ extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *); | |||
109 | extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *); | 110 | extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *); |
110 | 111 | ||
111 | extern void qla2xxx_wake_dpc(struct scsi_qla_host *); | 112 | extern void qla2xxx_wake_dpc(struct scsi_qla_host *); |
112 | extern void qla2x00_alert_all_vps(struct qla_hw_data *, uint16_t *); | 113 | extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *); |
113 | extern void qla2x00_async_event(scsi_qla_host_t *, uint16_t *); | 114 | extern void qla2x00_async_event(scsi_qla_host_t *, struct rsp_que *, |
115 | uint16_t *); | ||
114 | extern int qla2x00_vp_abort_isp(scsi_qla_host_t *); | 116 | extern int qla2x00_vp_abort_isp(scsi_qla_host_t *); |
115 | 117 | ||
116 | /* | 118 | /* |
@@ -122,8 +124,10 @@ extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t); | |||
122 | extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t); | 124 | extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t); |
123 | extern int qla2x00_start_scsi(srb_t *sp); | 125 | extern int qla2x00_start_scsi(srb_t *sp); |
124 | extern int qla24xx_start_scsi(srb_t *sp); | 126 | extern int qla24xx_start_scsi(srb_t *sp); |
125 | int qla2x00_marker(scsi_qla_host_t *, uint16_t, uint16_t, uint8_t); | 127 | int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, |
126 | int __qla2x00_marker(scsi_qla_host_t *, uint16_t, uint16_t, uint8_t); | 128 | uint16_t, uint16_t, uint8_t); |
129 | int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, | ||
130 | uint16_t, uint16_t, uint8_t); | ||
127 | 131 | ||
128 | /* | 132 | /* |
129 | * Global Function Prototypes in qla_mbx.c source file. | 133 | * Global Function Prototypes in qla_mbx.c source file. |
@@ -157,7 +161,7 @@ extern int | |||
157 | qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); | 161 | qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); |
158 | 162 | ||
159 | extern int | 163 | extern int |
160 | qla2x00_abort_command(scsi_qla_host_t *, srb_t *); | 164 | qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); |
161 | 165 | ||
162 | extern int | 166 | extern int |
163 | qla2x00_abort_target(struct fc_port *, unsigned int); | 167 | qla2x00_abort_target(struct fc_port *, unsigned int); |
@@ -228,7 +232,7 @@ extern int | |||
228 | qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, | 232 | qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, |
229 | dma_addr_t); | 233 | dma_addr_t); |
230 | 234 | ||
231 | extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *); | 235 | extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); |
232 | extern int qla24xx_abort_target(struct fc_port *, unsigned int); | 236 | extern int qla24xx_abort_target(struct fc_port *, unsigned int); |
233 | extern int qla24xx_lun_reset(struct fc_port *, unsigned int); | 237 | extern int qla24xx_lun_reset(struct fc_port *, unsigned int); |
234 | 238 | ||
@@ -267,10 +271,10 @@ extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *); | |||
267 | extern irqreturn_t qla2100_intr_handler(int, void *); | 271 | extern irqreturn_t qla2100_intr_handler(int, void *); |
268 | extern irqreturn_t qla2300_intr_handler(int, void *); | 272 | extern irqreturn_t qla2300_intr_handler(int, void *); |
269 | extern irqreturn_t qla24xx_intr_handler(int, void *); | 273 | extern irqreturn_t qla24xx_intr_handler(int, void *); |
270 | extern void qla2x00_process_response_queue(struct scsi_qla_host *); | 274 | extern void qla2x00_process_response_queue(struct rsp_que *); |
271 | extern void qla24xx_process_response_queue(struct scsi_qla_host *); | 275 | extern void qla24xx_process_response_queue(struct rsp_que *); |
272 | 276 | ||
273 | extern int qla2x00_request_irqs(struct qla_hw_data *); | 277 | extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *); |
274 | extern void qla2x00_free_irqs(scsi_qla_host_t *); | 278 | extern void qla2x00_free_irqs(scsi_qla_host_t *); |
275 | 279 | ||
276 | /* | 280 | /* |
@@ -370,4 +374,21 @@ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *); | |||
370 | */ | 374 | */ |
371 | extern int qla2x00_dfs_setup(scsi_qla_host_t *); | 375 | extern int qla2x00_dfs_setup(scsi_qla_host_t *); |
372 | extern int qla2x00_dfs_remove(scsi_qla_host_t *); | 376 | extern int qla2x00_dfs_remove(scsi_qla_host_t *); |
377 | |||
378 | /* Globa function prototypes for multi-q */ | ||
379 | extern int qla25xx_request_irq(struct rsp_que *); | ||
380 | extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *, | ||
381 | uint8_t); | ||
382 | extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *, | ||
383 | uint8_t); | ||
384 | extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, | ||
385 | uint16_t, uint8_t, uint8_t); | ||
386 | extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, | ||
387 | uint16_t); | ||
388 | extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t); | ||
389 | extern void qla2x00_init_response_q_entries(struct rsp_que *); | ||
390 | extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *); | ||
391 | extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *); | ||
392 | extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t); | ||
393 | extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t); | ||
373 | #endif /* _QLA_GBL_H */ | 394 | #endif /* _QLA_GBL_H */ |
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index db8de063e1cb..0a6f72973996 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c | |||
@@ -1668,12 +1668,6 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha) | |||
1668 | { | 1668 | { |
1669 | int rval; | 1669 | int rval; |
1670 | 1670 | ||
1671 | if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) { | ||
1672 | DEBUG2(printk("scsi(%ld): FDMI unsupported on " | ||
1673 | "ISP2100/ISP2200.\n", vha->host_no)); | ||
1674 | return QLA_SUCCESS; | ||
1675 | } | ||
1676 | |||
1677 | rval = qla2x00_mgmt_svr_login(vha); | 1671 | rval = qla2x00_mgmt_svr_login(vha); |
1678 | if (rval) | 1672 | if (rval) |
1679 | return rval; | 1673 | return rval; |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 7bee87f90f6d..b1495ec0bf35 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla2xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | #include "qla_def.h" | 7 | #include "qla_def.h" |
8 | #include "qla_gbl.h" | ||
8 | 9 | ||
9 | #include <linux/delay.h> | 10 | #include <linux/delay.h> |
10 | #include <linux/vmalloc.h> | 11 | #include <linux/vmalloc.h> |
@@ -21,7 +22,6 @@ | |||
21 | static int qla2x00_isp_firmware(scsi_qla_host_t *); | 22 | static int qla2x00_isp_firmware(scsi_qla_host_t *); |
22 | static void qla2x00_resize_request_q(scsi_qla_host_t *); | 23 | static void qla2x00_resize_request_q(scsi_qla_host_t *); |
23 | static int qla2x00_setup_chip(scsi_qla_host_t *); | 24 | static int qla2x00_setup_chip(scsi_qla_host_t *); |
24 | static void qla2x00_init_response_q_entries(scsi_qla_host_t *); | ||
25 | static int qla2x00_init_rings(scsi_qla_host_t *); | 25 | static int qla2x00_init_rings(scsi_qla_host_t *); |
26 | static int qla2x00_fw_ready(scsi_qla_host_t *); | 26 | static int qla2x00_fw_ready(scsi_qla_host_t *); |
27 | static int qla2x00_configure_hba(scsi_qla_host_t *); | 27 | static int qla2x00_configure_hba(scsi_qla_host_t *); |
@@ -39,6 +39,7 @@ static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *); | |||
39 | 39 | ||
40 | static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); | 40 | static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); |
41 | static int qla84xx_init_chip(scsi_qla_host_t *); | 41 | static int qla84xx_init_chip(scsi_qla_host_t *); |
42 | static int qla25xx_init_queues(struct qla_hw_data *); | ||
42 | 43 | ||
43 | /****************************************************************************/ | 44 | /****************************************************************************/ |
44 | /* QLogic ISP2x00 Hardware Support Functions. */ | 45 | /* QLogic ISP2x00 Hardware Support Functions. */ |
@@ -59,6 +60,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
59 | { | 60 | { |
60 | int rval; | 61 | int rval; |
61 | struct qla_hw_data *ha = vha->hw; | 62 | struct qla_hw_data *ha = vha->hw; |
63 | struct req_que *req = ha->req_q_map[0]; | ||
62 | /* Clear adapter flags. */ | 64 | /* Clear adapter flags. */ |
63 | vha->flags.online = 0; | 65 | vha->flags.online = 0; |
64 | vha->flags.reset_active = 0; | 66 | vha->flags.reset_active = 0; |
@@ -73,6 +75,9 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
73 | ha->beacon_blink_led = 0; | 75 | ha->beacon_blink_led = 0; |
74 | set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); | 76 | set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); |
75 | 77 | ||
78 | set_bit(0, ha->req_qid_map); | ||
79 | set_bit(0, ha->rsp_qid_map); | ||
80 | |||
76 | qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); | 81 | qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); |
77 | rval = ha->isp_ops->pci_config(vha); | 82 | rval = ha->isp_ops->pci_config(vha); |
78 | if (rval) { | 83 | if (rval) { |
@@ -90,7 +95,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
90 | return (rval); | 95 | return (rval); |
91 | } | 96 | } |
92 | 97 | ||
93 | ha->isp_ops->get_flash_version(vha, ha->req->ring); | 98 | ha->isp_ops->get_flash_version(vha, req->ring); |
94 | 99 | ||
95 | qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); | 100 | qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); |
96 | 101 | ||
@@ -603,6 +608,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) | |||
603 | uint16_t data; | 608 | uint16_t data; |
604 | uint32_t cnt; | 609 | uint32_t cnt; |
605 | uint16_t mb[5]; | 610 | uint16_t mb[5]; |
611 | struct req_que *req = ha->req_q_map[0]; | ||
606 | 612 | ||
607 | /* Assume a failed state */ | 613 | /* Assume a failed state */ |
608 | rval = QLA_FUNCTION_FAILED; | 614 | rval = QLA_FUNCTION_FAILED; |
@@ -671,11 +677,11 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) | |||
671 | ha->product_id[3] = mb[4]; | 677 | ha->product_id[3] = mb[4]; |
672 | 678 | ||
673 | /* Adjust fw RISC transfer size */ | 679 | /* Adjust fw RISC transfer size */ |
674 | if (ha->req->length > 1024) | 680 | if (req->length > 1024) |
675 | ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; | 681 | ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; |
676 | else | 682 | else |
677 | ha->fw_transfer_size = REQUEST_ENTRY_SIZE * | 683 | ha->fw_transfer_size = REQUEST_ENTRY_SIZE * |
678 | ha->req->length; | 684 | req->length; |
679 | 685 | ||
680 | if (IS_QLA2200(ha) && | 686 | if (IS_QLA2200(ha) && |
681 | RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { | 687 | RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { |
@@ -725,11 +731,12 @@ qla24xx_chip_diag(scsi_qla_host_t *vha) | |||
725 | { | 731 | { |
726 | int rval; | 732 | int rval; |
727 | struct qla_hw_data *ha = vha->hw; | 733 | struct qla_hw_data *ha = vha->hw; |
734 | struct req_que *req = ha->req_q_map[0]; | ||
728 | 735 | ||
729 | /* Perform RISC reset. */ | 736 | /* Perform RISC reset. */ |
730 | qla24xx_reset_risc(vha); | 737 | qla24xx_reset_risc(vha); |
731 | 738 | ||
732 | ha->fw_transfer_size = REQUEST_ENTRY_SIZE * ha->req->length; | 739 | ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; |
733 | 740 | ||
734 | rval = qla2x00_mbx_reg_test(vha); | 741 | rval = qla2x00_mbx_reg_test(vha); |
735 | if (rval) { | 742 | if (rval) { |
@@ -750,10 +757,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) | |||
750 | { | 757 | { |
751 | int rval; | 758 | int rval; |
752 | uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, | 759 | uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, |
753 | eft_size, fce_size; | 760 | eft_size, fce_size, mq_size; |
754 | dma_addr_t tc_dma; | 761 | dma_addr_t tc_dma; |
755 | void *tc; | 762 | void *tc; |
756 | struct qla_hw_data *ha = vha->hw; | 763 | struct qla_hw_data *ha = vha->hw; |
764 | struct req_que *req = ha->req_q_map[0]; | ||
765 | struct rsp_que *rsp = ha->rsp_q_map[0]; | ||
757 | 766 | ||
758 | if (ha->fw_dump) { | 767 | if (ha->fw_dump) { |
759 | qla_printk(KERN_WARNING, ha, | 768 | qla_printk(KERN_WARNING, ha, |
@@ -762,7 +771,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) | |||
762 | } | 771 | } |
763 | 772 | ||
764 | ha->fw_dumped = 0; | 773 | ha->fw_dumped = 0; |
765 | fixed_size = mem_size = eft_size = fce_size = 0; | 774 | fixed_size = mem_size = eft_size = fce_size = mq_size = 0; |
766 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) { | 775 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) { |
767 | fixed_size = sizeof(struct qla2100_fw_dump); | 776 | fixed_size = sizeof(struct qla2100_fw_dump); |
768 | } else if (IS_QLA23XX(ha)) { | 777 | } else if (IS_QLA23XX(ha)) { |
@@ -771,10 +780,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) | |||
771 | sizeof(uint16_t); | 780 | sizeof(uint16_t); |
772 | } else if (IS_FWI2_CAPABLE(ha)) { | 781 | } else if (IS_FWI2_CAPABLE(ha)) { |
773 | fixed_size = IS_QLA25XX(ha) ? | 782 | fixed_size = IS_QLA25XX(ha) ? |
774 | offsetof(struct qla25xx_fw_dump, ext_mem): | 783 | offsetof(struct qla25xx_fw_dump, ext_mem) : |
775 | offsetof(struct qla24xx_fw_dump, ext_mem); | 784 | offsetof(struct qla24xx_fw_dump, ext_mem); |
776 | mem_size = (ha->fw_memory_size - 0x100000 + 1) * | 785 | mem_size = (ha->fw_memory_size - 0x100000 + 1) * |
777 | sizeof(uint32_t); | 786 | sizeof(uint32_t); |
787 | if (ha->mqenable) | ||
788 | mq_size = sizeof(struct qla2xxx_mq_chain); | ||
778 | 789 | ||
779 | /* Allocate memory for Fibre Channel Event Buffer. */ | 790 | /* Allocate memory for Fibre Channel Event Buffer. */ |
780 | if (!IS_QLA25XX(ha)) | 791 | if (!IS_QLA25XX(ha)) |
@@ -785,7 +796,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) | |||
785 | if (!tc) { | 796 | if (!tc) { |
786 | qla_printk(KERN_WARNING, ha, "Unable to allocate " | 797 | qla_printk(KERN_WARNING, ha, "Unable to allocate " |
787 | "(%d KB) for FCE.\n", FCE_SIZE / 1024); | 798 | "(%d KB) for FCE.\n", FCE_SIZE / 1024); |
788 | goto try_eft; | 799 | goto cont_alloc; |
789 | } | 800 | } |
790 | 801 | ||
791 | memset(tc, 0, FCE_SIZE); | 802 | memset(tc, 0, FCE_SIZE); |
@@ -797,7 +808,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) | |||
797 | dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, | 808 | dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, |
798 | tc_dma); | 809 | tc_dma); |
799 | ha->flags.fce_enabled = 0; | 810 | ha->flags.fce_enabled = 0; |
800 | goto try_eft; | 811 | goto cont_alloc; |
801 | } | 812 | } |
802 | 813 | ||
803 | qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n", | 814 | qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n", |
@@ -835,12 +846,12 @@ try_eft: | |||
835 | ha->eft = tc; | 846 | ha->eft = tc; |
836 | } | 847 | } |
837 | cont_alloc: | 848 | cont_alloc: |
838 | req_q_size = ha->req->length * sizeof(request_t); | 849 | req_q_size = req->length * sizeof(request_t); |
839 | rsp_q_size = ha->rsp->length * sizeof(response_t); | 850 | rsp_q_size = rsp->length * sizeof(response_t); |
840 | 851 | ||
841 | dump_size = offsetof(struct qla2xxx_fw_dump, isp); | 852 | dump_size = offsetof(struct qla2xxx_fw_dump, isp); |
842 | dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + | 853 | dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + |
843 | eft_size + fce_size; | 854 | mq_size + eft_size + fce_size; |
844 | 855 | ||
845 | ha->fw_dump = vmalloc(dump_size); | 856 | ha->fw_dump = vmalloc(dump_size); |
846 | if (!ha->fw_dump) { | 857 | if (!ha->fw_dump) { |
@@ -855,7 +866,6 @@ cont_alloc: | |||
855 | } | 866 | } |
856 | return; | 867 | return; |
857 | } | 868 | } |
858 | |||
859 | qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n", | 869 | qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n", |
860 | dump_size / 1024); | 870 | dump_size / 1024); |
861 | 871 | ||
@@ -894,7 +904,7 @@ qla2x00_resize_request_q(scsi_qla_host_t *vha) | |||
894 | dma_addr_t request_dma; | 904 | dma_addr_t request_dma; |
895 | request_t *request_ring; | 905 | request_t *request_ring; |
896 | struct qla_hw_data *ha = vha->hw; | 906 | struct qla_hw_data *ha = vha->hw; |
897 | struct req_que *req = ha->req; | 907 | struct req_que *req = ha->req_q_map[0]; |
898 | 908 | ||
899 | /* Valid only on recent ISPs. */ | 909 | /* Valid only on recent ISPs. */ |
900 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) | 910 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) |
@@ -1030,12 +1040,11 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) | |||
1030 | * | 1040 | * |
1031 | * Returns 0 on success. | 1041 | * Returns 0 on success. |
1032 | */ | 1042 | */ |
1033 | static void | 1043 | void |
1034 | qla2x00_init_response_q_entries(scsi_qla_host_t *vha) | 1044 | qla2x00_init_response_q_entries(struct rsp_que *rsp) |
1035 | { | 1045 | { |
1036 | uint16_t cnt; | 1046 | uint16_t cnt; |
1037 | response_t *pkt; | 1047 | response_t *pkt; |
1038 | struct rsp_que *rsp = vha->hw->rsp; | ||
1039 | 1048 | ||
1040 | pkt = rsp->ring_ptr; | 1049 | pkt = rsp->ring_ptr; |
1041 | for (cnt = 0; cnt < rsp->length; cnt++) { | 1050 | for (cnt = 0; cnt < rsp->length; cnt++) { |
@@ -1151,8 +1160,8 @@ qla2x00_config_rings(struct scsi_qla_host *vha) | |||
1151 | { | 1160 | { |
1152 | struct qla_hw_data *ha = vha->hw; | 1161 | struct qla_hw_data *ha = vha->hw; |
1153 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; | 1162 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; |
1154 | struct req_que *req = ha->req; | 1163 | struct req_que *req = ha->req_q_map[0]; |
1155 | struct rsp_que *rsp = ha->rsp; | 1164 | struct rsp_que *rsp = ha->rsp_q_map[0]; |
1156 | 1165 | ||
1157 | /* Setup ring parameters in initialization control block. */ | 1166 | /* Setup ring parameters in initialization control block. */ |
1158 | ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0); | 1167 | ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0); |
@@ -1175,12 +1184,15 @@ void | |||
1175 | qla24xx_config_rings(struct scsi_qla_host *vha) | 1184 | qla24xx_config_rings(struct scsi_qla_host *vha) |
1176 | { | 1185 | { |
1177 | struct qla_hw_data *ha = vha->hw; | 1186 | struct qla_hw_data *ha = vha->hw; |
1178 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | 1187 | device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0); |
1188 | struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; | ||
1189 | struct qla_msix_entry *msix; | ||
1179 | struct init_cb_24xx *icb; | 1190 | struct init_cb_24xx *icb; |
1180 | struct req_que *req = ha->req; | 1191 | uint16_t rid = 0; |
1181 | struct rsp_que *rsp = ha->rsp; | 1192 | struct req_que *req = ha->req_q_map[0]; |
1193 | struct rsp_que *rsp = ha->rsp_q_map[0]; | ||
1182 | 1194 | ||
1183 | /* Setup ring parameters in initialization control block. */ | 1195 | /* Setup ring parameters in initialization control block. */ |
1184 | icb = (struct init_cb_24xx *)ha->init_cb; | 1196 | icb = (struct init_cb_24xx *)ha->init_cb; |
1185 | icb->request_q_outpointer = __constant_cpu_to_le16(0); | 1197 | icb->request_q_outpointer = __constant_cpu_to_le16(0); |
1186 | icb->response_q_inpointer = __constant_cpu_to_le16(0); | 1198 | icb->response_q_inpointer = __constant_cpu_to_le16(0); |
@@ -1191,11 +1203,40 @@ qla24xx_config_rings(struct scsi_qla_host *vha) | |||
1191 | icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); | 1203 | icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); |
1192 | icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); | 1204 | icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); |
1193 | 1205 | ||
1194 | WRT_REG_DWORD(®->req_q_in, 0); | 1206 | if (ha->mqenable) { |
1195 | WRT_REG_DWORD(®->req_q_out, 0); | 1207 | icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); |
1196 | WRT_REG_DWORD(®->rsp_q_in, 0); | 1208 | icb->rid = __constant_cpu_to_le16(rid); |
1197 | WRT_REG_DWORD(®->rsp_q_out, 0); | 1209 | if (ha->flags.msix_enabled) { |
1198 | RD_REG_DWORD(®->rsp_q_out); | 1210 | msix = &ha->msix_entries[1]; |
1211 | DEBUG2_17(printk(KERN_INFO | ||
1212 | "Reistering vector 0x%x for base que\n", msix->entry)); | ||
1213 | icb->msix = cpu_to_le16(msix->entry); | ||
1214 | } | ||
1215 | /* Use alternate PCI bus number */ | ||
1216 | if (MSB(rid)) | ||
1217 | icb->firmware_options_2 |= | ||
1218 | __constant_cpu_to_le32(BIT_19); | ||
1219 | /* Use alternate PCI devfn */ | ||
1220 | if (LSB(rid)) | ||
1221 | icb->firmware_options_2 |= | ||
1222 | __constant_cpu_to_le32(BIT_18); | ||
1223 | |||
1224 | icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22); | ||
1225 | icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); | ||
1226 | ha->rsp_q_map[0]->options = icb->firmware_options_2; | ||
1227 | |||
1228 | WRT_REG_DWORD(®->isp25mq.req_q_in, 0); | ||
1229 | WRT_REG_DWORD(®->isp25mq.req_q_out, 0); | ||
1230 | WRT_REG_DWORD(®->isp25mq.rsp_q_in, 0); | ||
1231 | WRT_REG_DWORD(®->isp25mq.rsp_q_out, 0); | ||
1232 | } else { | ||
1233 | WRT_REG_DWORD(®->isp24.req_q_in, 0); | ||
1234 | WRT_REG_DWORD(®->isp24.req_q_out, 0); | ||
1235 | WRT_REG_DWORD(®->isp24.rsp_q_in, 0); | ||
1236 | WRT_REG_DWORD(®->isp24.rsp_q_out, 0); | ||
1237 | } | ||
1238 | /* PCI posting */ | ||
1239 | RD_REG_DWORD(&ioreg->hccr); | ||
1199 | } | 1240 | } |
1200 | 1241 | ||
1201 | /** | 1242 | /** |
@@ -1214,8 +1255,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha) | |||
1214 | unsigned long flags = 0; | 1255 | unsigned long flags = 0; |
1215 | int cnt; | 1256 | int cnt; |
1216 | struct qla_hw_data *ha = vha->hw; | 1257 | struct qla_hw_data *ha = vha->hw; |
1217 | struct req_que *req = ha->req; | 1258 | struct req_que *req = ha->req_q_map[0]; |
1218 | struct rsp_que *rsp = ha->rsp; | 1259 | struct rsp_que *rsp = ha->rsp_q_map[0]; |
1219 | struct mid_init_cb_24xx *mid_init_cb = | 1260 | struct mid_init_cb_24xx *mid_init_cb = |
1220 | (struct mid_init_cb_24xx *) ha->init_cb; | 1261 | (struct mid_init_cb_24xx *) ha->init_cb; |
1221 | 1262 | ||
@@ -1239,7 +1280,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) | |||
1239 | rsp->ring_index = 0; | 1280 | rsp->ring_index = 0; |
1240 | 1281 | ||
1241 | /* Initialize response queue entries */ | 1282 | /* Initialize response queue entries */ |
1242 | qla2x00_init_response_q_entries(vha); | 1283 | qla2x00_init_response_q_entries(rsp); |
1243 | 1284 | ||
1244 | ha->isp_ops->config_rings(vha); | 1285 | ha->isp_ops->config_rings(vha); |
1245 | 1286 | ||
@@ -2039,10 +2080,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) | |||
2039 | if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { | 2080 | if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { |
2040 | if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) | 2081 | if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) |
2041 | set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); | 2082 | set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); |
2042 | if (test_bit(RSCN_UPDATE, &save_flags)) { | 2083 | if (test_bit(RSCN_UPDATE, &save_flags)) |
2043 | set_bit(RSCN_UPDATE, &vha->dpc_flags); | 2084 | set_bit(RSCN_UPDATE, &vha->dpc_flags); |
2044 | vha->flags.rscn_queue_overflow = 1; | ||
2045 | } | ||
2046 | } | 2085 | } |
2047 | 2086 | ||
2048 | return (rval); | 2087 | return (rval); |
@@ -3169,10 +3208,11 @@ qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
3169 | int | 3208 | int |
3170 | qla2x00_loop_resync(scsi_qla_host_t *vha) | 3209 | qla2x00_loop_resync(scsi_qla_host_t *vha) |
3171 | { | 3210 | { |
3172 | int rval; | 3211 | int rval = QLA_SUCCESS; |
3173 | uint32_t wait_time; | 3212 | uint32_t wait_time; |
3174 | 3213 | struct qla_hw_data *ha = vha->hw; | |
3175 | rval = QLA_SUCCESS; | 3214 | struct req_que *req = ha->req_q_map[0]; |
3215 | struct rsp_que *rsp = ha->rsp_q_map[0]; | ||
3176 | 3216 | ||
3177 | atomic_set(&vha->loop_state, LOOP_UPDATE); | 3217 | atomic_set(&vha->loop_state, LOOP_UPDATE); |
3178 | clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); | 3218 | clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); |
@@ -3184,7 +3224,8 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) | |||
3184 | atomic_set(&vha->loop_state, LOOP_UPDATE); | 3224 | atomic_set(&vha->loop_state, LOOP_UPDATE); |
3185 | 3225 | ||
3186 | /* Issue a marker after FW becomes ready. */ | 3226 | /* Issue a marker after FW becomes ready. */ |
3187 | qla2x00_marker(vha, 0, 0, MK_SYNC_ALL); | 3227 | qla2x00_marker(vha, req, rsp, 0, 0, |
3228 | MK_SYNC_ALL); | ||
3188 | vha->marker_needed = 0; | 3229 | vha->marker_needed = 0; |
3189 | 3230 | ||
3190 | /* Remap devices on Loop. */ | 3231 | /* Remap devices on Loop. */ |
@@ -3237,6 +3278,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) | |||
3237 | uint8_t status = 0; | 3278 | uint8_t status = 0; |
3238 | struct qla_hw_data *ha = vha->hw; | 3279 | struct qla_hw_data *ha = vha->hw; |
3239 | struct scsi_qla_host *vp; | 3280 | struct scsi_qla_host *vp; |
3281 | struct req_que *req = ha->req_q_map[0]; | ||
3240 | 3282 | ||
3241 | if (vha->flags.online) { | 3283 | if (vha->flags.online) { |
3242 | vha->flags.online = 0; | 3284 | vha->flags.online = 0; |
@@ -3262,7 +3304,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) | |||
3262 | /* Requeue all commands in outstanding command list. */ | 3304 | /* Requeue all commands in outstanding command list. */ |
3263 | qla2x00_abort_all_cmds(vha, DID_RESET << 16); | 3305 | qla2x00_abort_all_cmds(vha, DID_RESET << 16); |
3264 | 3306 | ||
3265 | ha->isp_ops->get_flash_version(vha, ha->req->ring); | 3307 | ha->isp_ops->get_flash_version(vha, req->ring); |
3266 | 3308 | ||
3267 | ha->isp_ops->nvram_config(vha); | 3309 | ha->isp_ops->nvram_config(vha); |
3268 | 3310 | ||
@@ -3376,6 +3418,8 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) | |||
3376 | uint8_t status = 0; | 3418 | uint8_t status = 0; |
3377 | uint32_t wait_time; | 3419 | uint32_t wait_time; |
3378 | struct qla_hw_data *ha = vha->hw; | 3420 | struct qla_hw_data *ha = vha->hw; |
3421 | struct req_que *req = ha->req_q_map[0]; | ||
3422 | struct rsp_que *rsp = ha->rsp_q_map[0]; | ||
3379 | 3423 | ||
3380 | /* If firmware needs to be loaded */ | 3424 | /* If firmware needs to be loaded */ |
3381 | if (qla2x00_isp_firmware(vha)) { | 3425 | if (qla2x00_isp_firmware(vha)) { |
@@ -3387,13 +3431,16 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) | |||
3387 | 3431 | ||
3388 | if (!status && !(status = qla2x00_init_rings(vha))) { | 3432 | if (!status && !(status = qla2x00_init_rings(vha))) { |
3389 | clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); | 3433 | clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); |
3434 | /* Initialize the queues in use */ | ||
3435 | qla25xx_init_queues(ha); | ||
3436 | |||
3390 | status = qla2x00_fw_ready(vha); | 3437 | status = qla2x00_fw_ready(vha); |
3391 | if (!status) { | 3438 | if (!status) { |
3392 | DEBUG(printk("%s(): Start configure loop, " | 3439 | DEBUG(printk("%s(): Start configure loop, " |
3393 | "status = %d\n", __func__, status)); | 3440 | "status = %d\n", __func__, status)); |
3394 | 3441 | ||
3395 | /* Issue a marker after FW becomes ready. */ | 3442 | /* Issue a marker after FW becomes ready. */ |
3396 | qla2x00_marker(vha, 0, 0, MK_SYNC_ALL); | 3443 | qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); |
3397 | 3444 | ||
3398 | vha->flags.online = 1; | 3445 | vha->flags.online = 1; |
3399 | /* Wait at most MAX_TARGET RSCNs for a stable link. */ | 3446 | /* Wait at most MAX_TARGET RSCNs for a stable link. */ |
@@ -3419,6 +3466,46 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) | |||
3419 | return (status); | 3466 | return (status); |
3420 | } | 3467 | } |
3421 | 3468 | ||
3469 | static int | ||
3470 | qla25xx_init_queues(struct qla_hw_data *ha) | ||
3471 | { | ||
3472 | struct rsp_que *rsp = NULL; | ||
3473 | struct req_que *req = NULL; | ||
3474 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); | ||
3475 | int ret = -1; | ||
3476 | int i; | ||
3477 | |||
3478 | for (i = 1; i < ha->max_queues; i++) { | ||
3479 | rsp = ha->rsp_q_map[i]; | ||
3480 | if (rsp) { | ||
3481 | rsp->options &= ~BIT_0; | ||
3482 | ret = qla25xx_init_rsp_que(base_vha, rsp, rsp->options); | ||
3483 | if (ret != QLA_SUCCESS) | ||
3484 | DEBUG2_17(printk(KERN_WARNING | ||
3485 | "%s Rsp que:%d init failed\n", __func__, | ||
3486 | rsp->id)); | ||
3487 | else | ||
3488 | DEBUG2_17(printk(KERN_INFO | ||
3489 | "%s Rsp que:%d inited\n", __func__, | ||
3490 | rsp->id)); | ||
3491 | } | ||
3492 | req = ha->req_q_map[i]; | ||
3493 | if (req) { | ||
3494 | req->options &= ~BIT_0; | ||
3495 | ret = qla25xx_init_req_que(base_vha, req, req->options); | ||
3496 | if (ret != QLA_SUCCESS) | ||
3497 | DEBUG2_17(printk(KERN_WARNING | ||
3498 | "%s Req que:%d init failed\n", __func__, | ||
3499 | req->id)); | ||
3500 | else | ||
3501 | DEBUG2_17(printk(KERN_WARNING | ||
3502 | "%s Rsp que:%d inited\n", __func__, | ||
3503 | req->id)); | ||
3504 | } | ||
3505 | } | ||
3506 | return ret; | ||
3507 | } | ||
3508 | |||
3422 | /* | 3509 | /* |
3423 | * qla2x00_reset_adapter | 3510 | * qla2x00_reset_adapter |
3424 | * Reset adapter. | 3511 | * Reset adapter. |
@@ -3736,7 +3823,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) | |||
3736 | static int | 3823 | static int |
3737 | qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) | 3824 | qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) |
3738 | { | 3825 | { |
3739 | int rval; | 3826 | int rval = QLA_SUCCESS; |
3740 | int segments, fragment; | 3827 | int segments, fragment; |
3741 | uint32_t faddr; | 3828 | uint32_t faddr; |
3742 | uint32_t *dcode, dlen; | 3829 | uint32_t *dcode, dlen; |
@@ -3744,11 +3831,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) | |||
3744 | uint32_t risc_size; | 3831 | uint32_t risc_size; |
3745 | uint32_t i; | 3832 | uint32_t i; |
3746 | struct qla_hw_data *ha = vha->hw; | 3833 | struct qla_hw_data *ha = vha->hw; |
3834 | struct req_que *req = ha->req_q_map[0]; | ||
3747 | rval = QLA_SUCCESS; | 3835 | rval = QLA_SUCCESS; |
3748 | 3836 | ||
3749 | segments = FA_RISC_CODE_SEGMENTS; | 3837 | segments = FA_RISC_CODE_SEGMENTS; |
3750 | faddr = ha->flt_region_fw; | 3838 | faddr = ha->flt_region_fw; |
3751 | dcode = (uint32_t *)ha->req->ring; | 3839 | dcode = (uint32_t *)req->ring; |
3752 | *srisc_addr = 0; | 3840 | *srisc_addr = 0; |
3753 | 3841 | ||
3754 | /* Validate firmware image by checking version. */ | 3842 | /* Validate firmware image by checking version. */ |
@@ -3790,7 +3878,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) | |||
3790 | for (i = 0; i < dlen; i++) | 3878 | for (i = 0; i < dlen; i++) |
3791 | dcode[i] = swab32(dcode[i]); | 3879 | dcode[i] = swab32(dcode[i]); |
3792 | 3880 | ||
3793 | rval = qla2x00_load_ram(vha, ha->req->dma, risc_addr, | 3881 | rval = qla2x00_load_ram(vha, req->dma, risc_addr, |
3794 | dlen); | 3882 | dlen); |
3795 | if (rval) { | 3883 | if (rval) { |
3796 | DEBUG(printk("scsi(%ld):[ERROR] Failed to load " | 3884 | DEBUG(printk("scsi(%ld):[ERROR] Failed to load " |
@@ -3826,6 +3914,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) | |||
3826 | uint32_t risc_addr, risc_size, fwclen, wlen, *seg; | 3914 | uint32_t risc_addr, risc_size, fwclen, wlen, *seg; |
3827 | struct fw_blob *blob; | 3915 | struct fw_blob *blob; |
3828 | struct qla_hw_data *ha = vha->hw; | 3916 | struct qla_hw_data *ha = vha->hw; |
3917 | struct req_que *req = ha->req_q_map[0]; | ||
3829 | 3918 | ||
3830 | /* Load firmware blob. */ | 3919 | /* Load firmware blob. */ |
3831 | blob = qla2x00_request_firmware(vha); | 3920 | blob = qla2x00_request_firmware(vha); |
@@ -3838,7 +3927,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) | |||
3838 | 3927 | ||
3839 | rval = QLA_SUCCESS; | 3928 | rval = QLA_SUCCESS; |
3840 | 3929 | ||
3841 | wcode = (uint16_t *)ha->req->ring; | 3930 | wcode = (uint16_t *)req->ring; |
3842 | *srisc_addr = 0; | 3931 | *srisc_addr = 0; |
3843 | fwcode = (uint16_t *)blob->fw->data; | 3932 | fwcode = (uint16_t *)blob->fw->data; |
3844 | fwclen = 0; | 3933 | fwclen = 0; |
@@ -3891,7 +3980,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) | |||
3891 | for (i = 0; i < wlen; i++) | 3980 | for (i = 0; i < wlen; i++) |
3892 | wcode[i] = swab16(fwcode[i]); | 3981 | wcode[i] = swab16(fwcode[i]); |
3893 | 3982 | ||
3894 | rval = qla2x00_load_ram(vha, ha->req->dma, risc_addr, | 3983 | rval = qla2x00_load_ram(vha, req->dma, risc_addr, |
3895 | wlen); | 3984 | wlen); |
3896 | if (rval) { | 3985 | if (rval) { |
3897 | DEBUG(printk("scsi(%ld):[ERROR] Failed to load " | 3986 | DEBUG(printk("scsi(%ld):[ERROR] Failed to load " |
@@ -3930,6 +4019,7 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) | |||
3930 | struct fw_blob *blob; | 4019 | struct fw_blob *blob; |
3931 | uint32_t *fwcode, fwclen; | 4020 | uint32_t *fwcode, fwclen; |
3932 | struct qla_hw_data *ha = vha->hw; | 4021 | struct qla_hw_data *ha = vha->hw; |
4022 | struct req_que *req = ha->req_q_map[0]; | ||
3933 | 4023 | ||
3934 | /* Load firmware blob. */ | 4024 | /* Load firmware blob. */ |
3935 | blob = qla2x00_request_firmware(vha); | 4025 | blob = qla2x00_request_firmware(vha); |
@@ -3947,7 +4037,7 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) | |||
3947 | rval = QLA_SUCCESS; | 4037 | rval = QLA_SUCCESS; |
3948 | 4038 | ||
3949 | segments = FA_RISC_CODE_SEGMENTS; | 4039 | segments = FA_RISC_CODE_SEGMENTS; |
3950 | dcode = (uint32_t *)ha->req->ring; | 4040 | dcode = (uint32_t *)req->ring; |
3951 | *srisc_addr = 0; | 4041 | *srisc_addr = 0; |
3952 | fwcode = (uint32_t *)blob->fw->data; | 4042 | fwcode = (uint32_t *)blob->fw->data; |
3953 | fwclen = 0; | 4043 | fwclen = 0; |
@@ -4001,7 +4091,7 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) | |||
4001 | for (i = 0; i < dlen; i++) | 4091 | for (i = 0; i < dlen; i++) |
4002 | dcode[i] = swab32(fwcode[i]); | 4092 | dcode[i] = swab32(fwcode[i]); |
4003 | 4093 | ||
4004 | rval = qla2x00_load_ram(vha, ha->req->dma, risc_addr, | 4094 | rval = qla2x00_load_ram(vha, req->dma, risc_addr, |
4005 | dlen); | 4095 | dlen); |
4006 | if (rval) { | 4096 | if (rval) { |
4007 | DEBUG(printk("scsi(%ld):[ERROR] Failed to load " | 4097 | DEBUG(printk("scsi(%ld):[ERROR] Failed to load " |
@@ -4060,6 +4150,8 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha) | |||
4060 | uint16_t mb[MAILBOX_REGISTER_COUNT]; | 4150 | uint16_t mb[MAILBOX_REGISTER_COUNT]; |
4061 | struct qla_hw_data *ha = vha->hw; | 4151 | struct qla_hw_data *ha = vha->hw; |
4062 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); | 4152 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); |
4153 | struct req_que *req = ha->req_q_map[0]; | ||
4154 | struct rsp_que *rsp = ha->rsp_q_map[0]; | ||
4063 | 4155 | ||
4064 | if (!vha->vp_idx) | 4156 | if (!vha->vp_idx) |
4065 | return -EINVAL; | 4157 | return -EINVAL; |
@@ -4067,7 +4159,7 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha) | |||
4067 | rval = qla2x00_fw_ready(base_vha); | 4159 | rval = qla2x00_fw_ready(base_vha); |
4068 | if (rval == QLA_SUCCESS) { | 4160 | if (rval == QLA_SUCCESS) { |
4069 | clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); | 4161 | clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); |
4070 | qla2x00_marker(vha, 0, 0, MK_SYNC_ALL); | 4162 | qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); |
4071 | } | 4163 | } |
4072 | 4164 | ||
4073 | vha->flags.management_server_logged_in = 0; | 4165 | vha->flags.management_server_logged_in = 0; |
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 8ce354720680..507a6e954f5c 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h | |||
@@ -41,32 +41,6 @@ qla2x00_poll(struct rsp_que *rsp) | |||
41 | local_irq_restore(flags); | 41 | local_irq_restore(flags); |
42 | } | 42 | } |
43 | 43 | ||
44 | /** | ||
45 | * qla2x00_issue_marker() - Issue a Marker IOCB if necessary. | ||
46 | * @ha: HA context | ||
47 | * @ha_locked: is function called with the hardware lock | ||
48 | * | ||
49 | * Returns non-zero if a failure occurred, else zero. | ||
50 | */ | ||
51 | static inline int | ||
52 | qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) | ||
53 | { | ||
54 | /* Send marker if required */ | ||
55 | if (vha->marker_needed != 0) { | ||
56 | if (ha_locked) { | ||
57 | if (__qla2x00_marker(vha, 0, 0, MK_SYNC_ALL) != | ||
58 | QLA_SUCCESS) | ||
59 | return (QLA_FUNCTION_FAILED); | ||
60 | } else { | ||
61 | if (qla2x00_marker(vha, 0, 0, MK_SYNC_ALL) != | ||
62 | QLA_SUCCESS) | ||
63 | return (QLA_FUNCTION_FAILED); | ||
64 | } | ||
65 | vha->marker_needed = 0; | ||
66 | } | ||
67 | return (QLA_SUCCESS); | ||
68 | } | ||
69 | |||
70 | static inline uint8_t * | 44 | static inline uint8_t * |
71 | host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) | 45 | host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) |
72 | { | 46 | { |
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 0c145c9e0cd9..6d2bd97c3b11 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -11,8 +11,9 @@ | |||
11 | 11 | ||
12 | #include <scsi/scsi_tcq.h> | 12 | #include <scsi/scsi_tcq.h> |
13 | 13 | ||
14 | static request_t *qla2x00_req_pkt(scsi_qla_host_t *); | 14 | static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *, |
15 | static void qla2x00_isp_cmd(scsi_qla_host_t *); | 15 | struct rsp_que *rsp); |
16 | static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *); | ||
16 | 17 | ||
17 | /** | 18 | /** |
18 | * qla2x00_get_cmd_direction() - Determine control_flag data direction. | 19 | * qla2x00_get_cmd_direction() - Determine control_flag data direction. |
@@ -91,10 +92,9 @@ qla2x00_calc_iocbs_64(uint16_t dsds) | |||
91 | * Returns a pointer to the Continuation Type 0 IOCB packet. | 92 | * Returns a pointer to the Continuation Type 0 IOCB packet. |
92 | */ | 93 | */ |
93 | static inline cont_entry_t * | 94 | static inline cont_entry_t * |
94 | qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *vha) | 95 | qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha) |
95 | { | 96 | { |
96 | cont_entry_t *cont_pkt; | 97 | cont_entry_t *cont_pkt; |
97 | struct req_que *req = vha->hw->req; | ||
98 | /* Adjust ring index. */ | 98 | /* Adjust ring index. */ |
99 | req->ring_index++; | 99 | req->ring_index++; |
100 | if (req->ring_index == req->length) { | 100 | if (req->ring_index == req->length) { |
@@ -120,10 +120,9 @@ qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *vha) | |||
120 | * Returns a pointer to the continuation type 1 IOCB packet. | 120 | * Returns a pointer to the continuation type 1 IOCB packet. |
121 | */ | 121 | */ |
122 | static inline cont_a64_entry_t * | 122 | static inline cont_a64_entry_t * |
123 | qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha) | 123 | qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha) |
124 | { | 124 | { |
125 | cont_a64_entry_t *cont_pkt; | 125 | cont_a64_entry_t *cont_pkt; |
126 | struct req_que *req = vha->hw->req; | ||
127 | 126 | ||
128 | /* Adjust ring index. */ | 127 | /* Adjust ring index. */ |
129 | req->ring_index++; | 128 | req->ring_index++; |
@@ -160,6 +159,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
160 | struct scsi_cmnd *cmd; | 159 | struct scsi_cmnd *cmd; |
161 | struct scatterlist *sg; | 160 | struct scatterlist *sg; |
162 | int i; | 161 | int i; |
162 | struct req_que *req; | ||
163 | uint16_t que_id; | ||
163 | 164 | ||
164 | cmd = sp->cmd; | 165 | cmd = sp->cmd; |
165 | 166 | ||
@@ -174,6 +175,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
174 | } | 175 | } |
175 | 176 | ||
176 | vha = sp->vha; | 177 | vha = sp->vha; |
178 | que_id = vha->req_ques[0]; | ||
179 | req = vha->hw->req_q_map[que_id]; | ||
177 | 180 | ||
178 | cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); | 181 | cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); |
179 | 182 | ||
@@ -191,7 +194,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
191 | * Seven DSDs are available in the Continuation | 194 | * Seven DSDs are available in the Continuation |
192 | * Type 0 IOCB. | 195 | * Type 0 IOCB. |
193 | */ | 196 | */ |
194 | cont_pkt = qla2x00_prep_cont_type0_iocb(vha); | 197 | cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha); |
195 | cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; | 198 | cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; |
196 | avail_dsds = 7; | 199 | avail_dsds = 7; |
197 | } | 200 | } |
@@ -219,6 +222,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
219 | struct scsi_cmnd *cmd; | 222 | struct scsi_cmnd *cmd; |
220 | struct scatterlist *sg; | 223 | struct scatterlist *sg; |
221 | int i; | 224 | int i; |
225 | struct req_que *req; | ||
226 | uint16_t que_id; | ||
222 | 227 | ||
223 | cmd = sp->cmd; | 228 | cmd = sp->cmd; |
224 | 229 | ||
@@ -233,6 +238,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
233 | } | 238 | } |
234 | 239 | ||
235 | vha = sp->vha; | 240 | vha = sp->vha; |
241 | que_id = vha->req_ques[0]; | ||
242 | req = vha->hw->req_q_map[que_id]; | ||
236 | 243 | ||
237 | cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); | 244 | cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); |
238 | 245 | ||
@@ -251,7 +258,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, | |||
251 | * Five DSDs are available in the Continuation | 258 | * Five DSDs are available in the Continuation |
252 | * Type 1 IOCB. | 259 | * Type 1 IOCB. |
253 | */ | 260 | */ |
254 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha); | 261 | cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); |
255 | cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; | 262 | cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; |
256 | avail_dsds = 5; | 263 | avail_dsds = 5; |
257 | } | 264 | } |
@@ -287,6 +294,7 @@ qla2x00_start_scsi(srb_t *sp) | |||
287 | struct device_reg_2xxx __iomem *reg; | 294 | struct device_reg_2xxx __iomem *reg; |
288 | struct qla_hw_data *ha; | 295 | struct qla_hw_data *ha; |
289 | struct req_que *req; | 296 | struct req_que *req; |
297 | struct rsp_que *rsp; | ||
290 | 298 | ||
291 | /* Setup device pointers. */ | 299 | /* Setup device pointers. */ |
292 | ret = 0; | 300 | ret = 0; |
@@ -294,13 +302,15 @@ qla2x00_start_scsi(srb_t *sp) | |||
294 | ha = vha->hw; | 302 | ha = vha->hw; |
295 | reg = &ha->iobase->isp; | 303 | reg = &ha->iobase->isp; |
296 | cmd = sp->cmd; | 304 | cmd = sp->cmd; |
297 | req = ha->req; | 305 | req = ha->req_q_map[0]; |
306 | rsp = ha->rsp_q_map[0]; | ||
298 | /* So we know we haven't pci_map'ed anything yet */ | 307 | /* So we know we haven't pci_map'ed anything yet */ |
299 | tot_dsds = 0; | 308 | tot_dsds = 0; |
300 | 309 | ||
301 | /* Send marker if required */ | 310 | /* Send marker if required */ |
302 | if (vha->marker_needed != 0) { | 311 | if (vha->marker_needed != 0) { |
303 | if (qla2x00_marker(vha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) | 312 | if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) |
313 | != QLA_SUCCESS) | ||
304 | return (QLA_FUNCTION_FAILED); | 314 | return (QLA_FUNCTION_FAILED); |
305 | vha->marker_needed = 0; | 315 | vha->marker_needed = 0; |
306 | } | 316 | } |
@@ -392,8 +402,8 @@ qla2x00_start_scsi(srb_t *sp) | |||
392 | 402 | ||
393 | /* Manage unprocessed RIO/ZIO commands in response queue. */ | 403 | /* Manage unprocessed RIO/ZIO commands in response queue. */ |
394 | if (vha->flags.process_response_queue && | 404 | if (vha->flags.process_response_queue && |
395 | ha->rsp->ring_ptr->signature != RESPONSE_PROCESSED) | 405 | rsp->ring_ptr->signature != RESPONSE_PROCESSED) |
396 | qla2x00_process_response_queue(vha); | 406 | qla2x00_process_response_queue(rsp); |
397 | 407 | ||
398 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 408 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
399 | return (QLA_SUCCESS); | 409 | return (QLA_SUCCESS); |
@@ -419,8 +429,9 @@ queuing_error: | |||
419 | * Returns non-zero if a failure occurred, else zero. | 429 | * Returns non-zero if a failure occurred, else zero. |
420 | */ | 430 | */ |
421 | int | 431 | int |
422 | __qla2x00_marker(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t lun, | 432 | __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, |
423 | uint8_t type) | 433 | struct rsp_que *rsp, uint16_t loop_id, |
434 | uint16_t lun, uint8_t type) | ||
424 | { | 435 | { |
425 | mrk_entry_t *mrk; | 436 | mrk_entry_t *mrk; |
426 | struct mrk_entry_24xx *mrk24; | 437 | struct mrk_entry_24xx *mrk24; |
@@ -428,7 +439,7 @@ __qla2x00_marker(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t lun, | |||
428 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); | 439 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); |
429 | 440 | ||
430 | mrk24 = NULL; | 441 | mrk24 = NULL; |
431 | mrk = (mrk_entry_t *)qla2x00_req_pkt(base_vha); | 442 | mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp); |
432 | if (mrk == NULL) { | 443 | if (mrk == NULL) { |
433 | DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", | 444 | DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", |
434 | __func__, base_vha->host_no)); | 445 | __func__, base_vha->host_no)); |
@@ -453,22 +464,22 @@ __qla2x00_marker(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t lun, | |||
453 | } | 464 | } |
454 | wmb(); | 465 | wmb(); |
455 | 466 | ||
456 | qla2x00_isp_cmd(base_vha); | 467 | qla2x00_isp_cmd(vha, req); |
457 | 468 | ||
458 | return (QLA_SUCCESS); | 469 | return (QLA_SUCCESS); |
459 | } | 470 | } |
460 | 471 | ||
461 | int | 472 | int |
462 | qla2x00_marker(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t lun, | 473 | qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, |
463 | uint8_t type) | 474 | struct rsp_que *rsp, uint16_t loop_id, uint16_t lun, |
475 | uint8_t type) | ||
464 | { | 476 | { |
465 | int ret; | 477 | int ret; |
466 | unsigned long flags = 0; | 478 | unsigned long flags = 0; |
467 | struct qla_hw_data *ha = vha->hw; | ||
468 | 479 | ||
469 | spin_lock_irqsave(&ha->hardware_lock, flags); | 480 | spin_lock_irqsave(&vha->hw->hardware_lock, flags); |
470 | ret = __qla2x00_marker(vha, loop_id, lun, type); | 481 | ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type); |
471 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 482 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); |
472 | 483 | ||
473 | return (ret); | 484 | return (ret); |
474 | } | 485 | } |
@@ -482,27 +493,32 @@ qla2x00_marker(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t lun, | |||
482 | * Returns NULL if function failed, else, a pointer to the request packet. | 493 | * Returns NULL if function failed, else, a pointer to the request packet. |
483 | */ | 494 | */ |
484 | static request_t * | 495 | static request_t * |
485 | qla2x00_req_pkt(scsi_qla_host_t *vha) | 496 | qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req, |
497 | struct rsp_que *rsp) | ||
486 | { | 498 | { |
487 | struct qla_hw_data *ha = vha->hw; | 499 | struct qla_hw_data *ha = vha->hw; |
488 | device_reg_t __iomem *reg = ha->iobase; | 500 | device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); |
489 | request_t *pkt = NULL; | 501 | request_t *pkt = NULL; |
490 | uint16_t cnt; | 502 | uint16_t cnt; |
491 | uint32_t *dword_ptr; | 503 | uint32_t *dword_ptr; |
492 | uint32_t timer; | 504 | uint32_t timer; |
493 | uint16_t req_cnt = 1; | 505 | uint16_t req_cnt = 1; |
494 | struct req_que *req = ha->req; | ||
495 | 506 | ||
496 | /* Wait 1 second for slot. */ | 507 | /* Wait 1 second for slot. */ |
497 | for (timer = HZ; timer; timer--) { | 508 | for (timer = HZ; timer; timer--) { |
498 | if ((req_cnt + 2) >= req->cnt) { | 509 | if ((req_cnt + 2) >= req->cnt) { |
499 | /* Calculate number of free request entries. */ | 510 | /* Calculate number of free request entries. */ |
500 | if (IS_FWI2_CAPABLE(ha)) | 511 | if (ha->mqenable) |
501 | cnt = (uint16_t)RD_REG_DWORD( | 512 | cnt = (uint16_t) |
502 | ®->isp24.req_q_out); | 513 | RD_REG_DWORD(®->isp25mq.req_q_out); |
503 | else | 514 | else { |
504 | cnt = qla2x00_debounce_register( | 515 | if (IS_FWI2_CAPABLE(ha)) |
505 | ISP_REQ_Q_OUT(ha, ®->isp)); | 516 | cnt = (uint16_t)RD_REG_DWORD( |
517 | ®->isp24.req_q_out); | ||
518 | else | ||
519 | cnt = qla2x00_debounce_register( | ||
520 | ISP_REQ_Q_OUT(ha, ®->isp)); | ||
521 | } | ||
506 | if (req->ring_index < cnt) | 522 | if (req->ring_index < cnt) |
507 | req->cnt = cnt - req->ring_index; | 523 | req->cnt = cnt - req->ring_index; |
508 | else | 524 | else |
@@ -536,7 +552,7 @@ qla2x00_req_pkt(scsi_qla_host_t *vha) | |||
536 | /* Check for pending interrupts. */ | 552 | /* Check for pending interrupts. */ |
537 | /* During init we issue marker directly */ | 553 | /* During init we issue marker directly */ |
538 | if (!vha->marker_needed && !vha->flags.init_done) | 554 | if (!vha->marker_needed && !vha->flags.init_done) |
539 | qla2x00_poll(ha->rsp); | 555 | qla2x00_poll(rsp); |
540 | spin_lock_irq(&ha->hardware_lock); | 556 | spin_lock_irq(&ha->hardware_lock); |
541 | } | 557 | } |
542 | if (!pkt) { | 558 | if (!pkt) { |
@@ -553,11 +569,10 @@ qla2x00_req_pkt(scsi_qla_host_t *vha) | |||
553 | * Note: The caller must hold the hardware lock before calling this routine. | 569 | * Note: The caller must hold the hardware lock before calling this routine. |
554 | */ | 570 | */ |
555 | static void | 571 | static void |
556 | qla2x00_isp_cmd(scsi_qla_host_t *vha) | 572 | qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req) |
557 | { | 573 | { |
558 | struct qla_hw_data *ha = vha->hw; | 574 | struct qla_hw_data *ha = vha->hw; |
559 | device_reg_t __iomem *reg = ha->iobase; | 575 | device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); |
560 | struct req_que *req = ha->req; | ||
561 | 576 | ||
562 | DEBUG5(printk("%s(): IOCB data:\n", __func__)); | 577 | DEBUG5(printk("%s(): IOCB data:\n", __func__)); |
563 | DEBUG5(qla2x00_dump_buffer( | 578 | DEBUG5(qla2x00_dump_buffer( |
@@ -572,12 +587,17 @@ qla2x00_isp_cmd(scsi_qla_host_t *vha) | |||
572 | req->ring_ptr++; | 587 | req->ring_ptr++; |
573 | 588 | ||
574 | /* Set chip new ring index. */ | 589 | /* Set chip new ring index. */ |
575 | if (IS_FWI2_CAPABLE(ha)) { | 590 | if (ha->mqenable) |
576 | WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); | 591 | RD_REG_DWORD(®->isp25mq.req_q_out); |
577 | RD_REG_DWORD_RELAXED(®->isp24.req_q_in); | 592 | else { |
578 | } else { | 593 | if (IS_FWI2_CAPABLE(ha)) { |
579 | WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), req->ring_index); | 594 | WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); |
580 | RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); | 595 | RD_REG_DWORD_RELAXED(®->isp24.req_q_in); |
596 | } else { | ||
597 | WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), | ||
598 | req->ring_index); | ||
599 | RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); | ||
600 | } | ||
581 | } | 601 | } |
582 | 602 | ||
583 | } | 603 | } |
@@ -622,6 +642,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, | |||
622 | struct scsi_cmnd *cmd; | 642 | struct scsi_cmnd *cmd; |
623 | struct scatterlist *sg; | 643 | struct scatterlist *sg; |
624 | int i; | 644 | int i; |
645 | uint16_t que_id; | ||
646 | struct req_que *req; | ||
625 | 647 | ||
626 | cmd = sp->cmd; | 648 | cmd = sp->cmd; |
627 | 649 | ||
@@ -636,6 +658,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, | |||
636 | } | 658 | } |
637 | 659 | ||
638 | vha = sp->vha; | 660 | vha = sp->vha; |
661 | que_id = vha->req_ques[0]; | ||
662 | req = vha->hw->req_q_map[que_id]; | ||
639 | 663 | ||
640 | /* Set transfer direction */ | 664 | /* Set transfer direction */ |
641 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { | 665 | if (cmd->sc_data_direction == DMA_TO_DEVICE) { |
@@ -666,7 +690,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, | |||
666 | * Five DSDs are available in the Continuation | 690 | * Five DSDs are available in the Continuation |
667 | * Type 1 IOCB. | 691 | * Type 1 IOCB. |
668 | */ | 692 | */ |
669 | cont_pkt = qla2x00_prep_cont_type1_iocb(vha); | 693 | cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); |
670 | cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; | 694 | cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; |
671 | avail_dsds = 5; | 695 | avail_dsds = 5; |
672 | } | 696 | } |
@@ -691,8 +715,6 @@ qla24xx_start_scsi(srb_t *sp) | |||
691 | { | 715 | { |
692 | int ret, nseg; | 716 | int ret, nseg; |
693 | unsigned long flags; | 717 | unsigned long flags; |
694 | scsi_qla_host_t *vha; | ||
695 | struct scsi_cmnd *cmd; | ||
696 | uint32_t *clr_ptr; | 718 | uint32_t *clr_ptr; |
697 | uint32_t index; | 719 | uint32_t index; |
698 | uint32_t handle; | 720 | uint32_t handle; |
@@ -700,23 +722,32 @@ qla24xx_start_scsi(srb_t *sp) | |||
700 | uint16_t cnt; | 722 | uint16_t cnt; |
701 | uint16_t req_cnt; | 723 | uint16_t req_cnt; |
702 | uint16_t tot_dsds; | 724 | uint16_t tot_dsds; |
703 | struct device_reg_24xx __iomem *reg; | 725 | struct req_que *req = NULL; |
704 | struct qla_hw_data *ha; | 726 | struct rsp_que *rsp = NULL; |
705 | struct req_que *req; | 727 | struct scsi_cmnd *cmd = sp->cmd; |
728 | struct scsi_qla_host *vha = sp->vha; | ||
729 | struct qla_hw_data *ha = vha->hw; | ||
730 | device_reg_t __iomem *reg; | ||
731 | uint16_t que_id; | ||
706 | 732 | ||
707 | /* Setup device pointers. */ | 733 | /* Setup device pointers. */ |
708 | ret = 0; | 734 | ret = 0; |
709 | vha = sp->vha; | 735 | que_id = vha->req_ques[0]; |
710 | ha = vha->hw; | 736 | |
711 | reg = &ha->iobase->isp24; | 737 | req = ha->req_q_map[que_id]; |
712 | cmd = sp->cmd; | 738 | reg = ISP_QUE_REG(ha, req->id); |
713 | req = ha->req; | 739 | |
740 | if (req->rsp) | ||
741 | rsp = req->rsp; | ||
742 | else | ||
743 | rsp = ha->rsp_q_map[que_id]; | ||
714 | /* So we know we haven't pci_map'ed anything yet */ | 744 | /* So we know we haven't pci_map'ed anything yet */ |
715 | tot_dsds = 0; | 745 | tot_dsds = 0; |
716 | 746 | ||
717 | /* Send marker if required */ | 747 | /* Send marker if required */ |
718 | if (vha->marker_needed != 0) { | 748 | if (vha->marker_needed != 0) { |
719 | if (qla2x00_marker(vha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) | 749 | if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) |
750 | != QLA_SUCCESS) | ||
720 | return QLA_FUNCTION_FAILED; | 751 | return QLA_FUNCTION_FAILED; |
721 | vha->marker_needed = 0; | 752 | vha->marker_needed = 0; |
722 | } | 753 | } |
@@ -749,7 +780,13 @@ qla24xx_start_scsi(srb_t *sp) | |||
749 | 780 | ||
750 | req_cnt = qla24xx_calc_iocbs(tot_dsds); | 781 | req_cnt = qla24xx_calc_iocbs(tot_dsds); |
751 | if (req->cnt < (req_cnt + 2)) { | 782 | if (req->cnt < (req_cnt + 2)) { |
752 | cnt = (uint16_t)RD_REG_DWORD_RELAXED(®->req_q_out); | 783 | if (ha->mqenable) |
784 | cnt = (uint16_t) | ||
785 | RD_REG_DWORD_RELAXED(®->isp25mq.req_q_out); | ||
786 | else | ||
787 | cnt = (uint16_t) | ||
788 | RD_REG_DWORD_RELAXED(®->isp24.req_q_out); | ||
789 | |||
753 | if (req->ring_index < cnt) | 790 | if (req->ring_index < cnt) |
754 | req->cnt = cnt - req->ring_index; | 791 | req->cnt = cnt - req->ring_index; |
755 | else | 792 | else |
@@ -809,13 +846,17 @@ qla24xx_start_scsi(srb_t *sp) | |||
809 | sp->flags |= SRB_DMA_VALID; | 846 | sp->flags |= SRB_DMA_VALID; |
810 | 847 | ||
811 | /* Set chip new ring index. */ | 848 | /* Set chip new ring index. */ |
812 | WRT_REG_DWORD(®->req_q_in, req->ring_index); | 849 | if (ha->mqenable) |
813 | RD_REG_DWORD_RELAXED(®->req_q_in); /* PCI Posting. */ | 850 | WRT_REG_DWORD(®->isp25mq.req_q_in, req->ring_index); |
851 | else { | ||
852 | WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); | ||
853 | RD_REG_DWORD_RELAXED(®->isp24.req_q_in); | ||
854 | } | ||
814 | 855 | ||
815 | /* Manage unprocessed RIO/ZIO commands in response queue. */ | 856 | /* Manage unprocessed RIO/ZIO commands in response queue. */ |
816 | if (vha->flags.process_response_queue && | 857 | if (vha->flags.process_response_queue && |
817 | ha->rsp->ring_ptr->signature != RESPONSE_PROCESSED) | 858 | rsp->ring_ptr->signature != RESPONSE_PROCESSED) |
818 | qla24xx_process_response_queue(vha); | 859 | qla24xx_process_response_queue(rsp); |
819 | 860 | ||
820 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 861 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
821 | return QLA_SUCCESS; | 862 | return QLA_SUCCESS; |
@@ -828,3 +869,4 @@ queuing_error: | |||
828 | 869 | ||
829 | return QLA_FUNCTION_FAILED; | 870 | return QLA_FUNCTION_FAILED; |
830 | } | 871 | } |
872 | |||
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 89d327117aa8..eb4b43d7697f 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -10,10 +10,12 @@ | |||
10 | #include <scsi/scsi_tcq.h> | 10 | #include <scsi/scsi_tcq.h> |
11 | 11 | ||
12 | static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); | 12 | static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); |
13 | static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t); | 13 | static void qla2x00_process_completed_request(struct scsi_qla_host *, |
14 | static void qla2x00_status_entry(scsi_qla_host_t *, void *); | 14 | struct req_que *, uint32_t); |
15 | static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); | ||
15 | static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); | 16 | static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); |
16 | static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *); | 17 | static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, |
18 | sts_entry_t *); | ||
17 | static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *); | 19 | static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *); |
18 | 20 | ||
19 | /** | 21 | /** |
@@ -83,7 +85,7 @@ qla2100_intr_handler(int irq, void *dev_id) | |||
83 | mb[1] = RD_MAILBOX_REG(ha, reg, 1); | 85 | mb[1] = RD_MAILBOX_REG(ha, reg, 1); |
84 | mb[2] = RD_MAILBOX_REG(ha, reg, 2); | 86 | mb[2] = RD_MAILBOX_REG(ha, reg, 2); |
85 | mb[3] = RD_MAILBOX_REG(ha, reg, 3); | 87 | mb[3] = RD_MAILBOX_REG(ha, reg, 3); |
86 | qla2x00_async_event(vha, mb); | 88 | qla2x00_async_event(vha, rsp, mb); |
87 | } else { | 89 | } else { |
88 | /*EMPTY*/ | 90 | /*EMPTY*/ |
89 | DEBUG2(printk("scsi(%ld): Unrecognized " | 91 | DEBUG2(printk("scsi(%ld): Unrecognized " |
@@ -94,7 +96,7 @@ qla2100_intr_handler(int irq, void *dev_id) | |||
94 | WRT_REG_WORD(®->semaphore, 0); | 96 | WRT_REG_WORD(®->semaphore, 0); |
95 | RD_REG_WORD(®->semaphore); | 97 | RD_REG_WORD(®->semaphore); |
96 | } else { | 98 | } else { |
97 | qla2x00_process_response_queue(vha); | 99 | qla2x00_process_response_queue(rsp); |
98 | 100 | ||
99 | WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); | 101 | WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); |
100 | RD_REG_WORD(®->hccr); | 102 | RD_REG_WORD(®->hccr); |
@@ -190,21 +192,21 @@ qla2300_intr_handler(int irq, void *dev_id) | |||
190 | mb[1] = RD_MAILBOX_REG(ha, reg, 1); | 192 | mb[1] = RD_MAILBOX_REG(ha, reg, 1); |
191 | mb[2] = RD_MAILBOX_REG(ha, reg, 2); | 193 | mb[2] = RD_MAILBOX_REG(ha, reg, 2); |
192 | mb[3] = RD_MAILBOX_REG(ha, reg, 3); | 194 | mb[3] = RD_MAILBOX_REG(ha, reg, 3); |
193 | qla2x00_async_event(vha, mb); | 195 | qla2x00_async_event(vha, rsp, mb); |
194 | break; | 196 | break; |
195 | case 0x13: | 197 | case 0x13: |
196 | qla2x00_process_response_queue(vha); | 198 | qla2x00_process_response_queue(rsp); |
197 | break; | 199 | break; |
198 | case 0x15: | 200 | case 0x15: |
199 | mb[0] = MBA_CMPLT_1_16BIT; | 201 | mb[0] = MBA_CMPLT_1_16BIT; |
200 | mb[1] = MSW(stat); | 202 | mb[1] = MSW(stat); |
201 | qla2x00_async_event(vha, mb); | 203 | qla2x00_async_event(vha, rsp, mb); |
202 | break; | 204 | break; |
203 | case 0x16: | 205 | case 0x16: |
204 | mb[0] = MBA_SCSI_COMPLETION; | 206 | mb[0] = MBA_SCSI_COMPLETION; |
205 | mb[1] = MSW(stat); | 207 | mb[1] = MSW(stat); |
206 | mb[2] = RD_MAILBOX_REG(ha, reg, 2); | 208 | mb[2] = RD_MAILBOX_REG(ha, reg, 2); |
207 | qla2x00_async_event(vha, mb); | 209 | qla2x00_async_event(vha, rsp, mb); |
208 | break; | 210 | break; |
209 | default: | 211 | default: |
210 | DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " | 212 | DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " |
@@ -270,7 +272,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) | |||
270 | * @mb: Mailbox registers (0 - 3) | 272 | * @mb: Mailbox registers (0 - 3) |
271 | */ | 273 | */ |
272 | void | 274 | void |
273 | qla2x00_async_event(scsi_qla_host_t *vha, uint16_t *mb) | 275 | qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) |
274 | { | 276 | { |
275 | #define LS_UNKNOWN 2 | 277 | #define LS_UNKNOWN 2 |
276 | static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; | 278 | static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; |
@@ -344,7 +346,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, uint16_t *mb) | |||
344 | break; | 346 | break; |
345 | 347 | ||
346 | for (cnt = 0; cnt < handle_cnt; cnt++) | 348 | for (cnt = 0; cnt < handle_cnt; cnt++) |
347 | qla2x00_process_completed_request(vha, handles[cnt]); | 349 | qla2x00_process_completed_request(vha, rsp->req, |
350 | handles[cnt]); | ||
348 | break; | 351 | break; |
349 | 352 | ||
350 | case MBA_RESET: /* Reset */ | 353 | case MBA_RESET: /* Reset */ |
@@ -554,6 +557,10 @@ qla2x00_async_event(scsi_qla_host_t *vha, uint16_t *mb) | |||
554 | break; | 557 | break; |
555 | 558 | ||
556 | case MBA_PORT_UPDATE: /* Port database update */ | 559 | case MBA_PORT_UPDATE: /* Port database update */ |
560 | /* Only handle SCNs for our Vport index. */ | ||
561 | if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff)) | ||
562 | break; | ||
563 | |||
557 | /* | 564 | /* |
558 | * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET | 565 | * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET |
559 | * event etc. earlier indicating loop is down) then process | 566 | * event etc. earlier indicating loop is down) then process |
@@ -641,9 +648,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, uint16_t *mb) | |||
641 | vha->host_no)); | 648 | vha->host_no)); |
642 | 649 | ||
643 | if (IS_FWI2_CAPABLE(ha)) | 650 | if (IS_FWI2_CAPABLE(ha)) |
644 | qla24xx_process_response_queue(vha); | 651 | qla24xx_process_response_queue(rsp); |
645 | else | 652 | else |
646 | qla2x00_process_response_queue(vha); | 653 | qla2x00_process_response_queue(rsp); |
647 | break; | 654 | break; |
648 | 655 | ||
649 | case MBA_DISCARD_RND_FRAME: | 656 | case MBA_DISCARD_RND_FRAME: |
@@ -694,15 +701,21 @@ qla2x00_async_event(scsi_qla_host_t *vha, uint16_t *mb) | |||
694 | } | 701 | } |
695 | 702 | ||
696 | if (!vha->vp_idx && ha->num_vhosts) | 703 | if (!vha->vp_idx && ha->num_vhosts) |
697 | qla2x00_alert_all_vps(ha, mb); | 704 | qla2x00_alert_all_vps(rsp, mb); |
698 | } | 705 | } |
699 | 706 | ||
700 | static void | 707 | static void |
701 | qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) | 708 | qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) |
702 | { | 709 | { |
703 | fc_port_t *fcport = data; | 710 | fc_port_t *fcport = data; |
704 | struct qla_hw_data *ha = fcport->vha->hw; | 711 | struct scsi_qla_host *vha = fcport->vha; |
705 | if (ha->req->max_q_depth <= sdev->queue_depth) | 712 | struct qla_hw_data *ha = vha->hw; |
713 | struct req_que *req = NULL; | ||
714 | |||
715 | req = ha->req_q_map[vha->req_ques[0]]; | ||
716 | if (!req) | ||
717 | return; | ||
718 | if (req->max_q_depth <= sdev->queue_depth) | ||
706 | return; | 719 | return; |
707 | 720 | ||
708 | if (sdev->ordered_tags) | 721 | if (sdev->ordered_tags) |
@@ -735,14 +748,14 @@ qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data) | |||
735 | } | 748 | } |
736 | 749 | ||
737 | static inline void | 750 | static inline void |
738 | qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, srb_t *sp) | 751 | qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req, |
752 | srb_t *sp) | ||
739 | { | 753 | { |
740 | fc_port_t *fcport; | 754 | fc_port_t *fcport; |
741 | struct scsi_device *sdev; | 755 | struct scsi_device *sdev; |
742 | struct qla_hw_data *ha = vha->hw; | ||
743 | 756 | ||
744 | sdev = sp->cmd->device; | 757 | sdev = sp->cmd->device; |
745 | if (sdev->queue_depth >= ha->req->max_q_depth) | 758 | if (sdev->queue_depth >= req->max_q_depth) |
746 | return; | 759 | return; |
747 | 760 | ||
748 | fcport = sp->fcport; | 761 | fcport = sp->fcport; |
@@ -763,11 +776,11 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, srb_t *sp) | |||
763 | * @index: SRB index | 776 | * @index: SRB index |
764 | */ | 777 | */ |
765 | static void | 778 | static void |
766 | qla2x00_process_completed_request(struct scsi_qla_host *vha, uint32_t index) | 779 | qla2x00_process_completed_request(struct scsi_qla_host *vha, |
780 | struct req_que *req, uint32_t index) | ||
767 | { | 781 | { |
768 | srb_t *sp; | 782 | srb_t *sp; |
769 | struct qla_hw_data *ha = vha->hw; | 783 | struct qla_hw_data *ha = vha->hw; |
770 | struct req_que *req = ha->req; | ||
771 | 784 | ||
772 | /* Validate handle. */ | 785 | /* Validate handle. */ |
773 | if (index >= MAX_OUTSTANDING_COMMANDS) { | 786 | if (index >= MAX_OUTSTANDING_COMMANDS) { |
@@ -791,8 +804,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, uint32_t index) | |||
791 | /* Save ISP completion status */ | 804 | /* Save ISP completion status */ |
792 | sp->cmd->result = DID_OK << 16; | 805 | sp->cmd->result = DID_OK << 16; |
793 | 806 | ||
794 | qla2x00_ramp_up_queue_depth(vha, sp); | 807 | qla2x00_ramp_up_queue_depth(vha, req, sp); |
795 | qla2x00_sp_compl(vha, sp); | 808 | qla2x00_sp_compl(ha, sp); |
796 | } else { | 809 | } else { |
797 | DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", | 810 | DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", |
798 | vha->host_no)); | 811 | vha->host_no)); |
@@ -808,14 +821,16 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, uint32_t index) | |||
808 | * @ha: SCSI driver HA context | 821 | * @ha: SCSI driver HA context |
809 | */ | 822 | */ |
810 | void | 823 | void |
811 | qla2x00_process_response_queue(struct scsi_qla_host *vha) | 824 | qla2x00_process_response_queue(struct rsp_que *rsp) |
812 | { | 825 | { |
813 | struct qla_hw_data *ha = vha->hw; | 826 | struct scsi_qla_host *vha; |
827 | struct qla_hw_data *ha = rsp->hw; | ||
814 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; | 828 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; |
815 | sts_entry_t *pkt; | 829 | sts_entry_t *pkt; |
816 | uint16_t handle_cnt; | 830 | uint16_t handle_cnt; |
817 | uint16_t cnt; | 831 | uint16_t cnt; |
818 | struct rsp_que *rsp = ha->rsp; | 832 | |
833 | vha = qla2x00_get_rsp_host(rsp); | ||
819 | 834 | ||
820 | if (!vha->flags.online) | 835 | if (!vha->flags.online) |
821 | return; | 836 | return; |
@@ -835,7 +850,7 @@ qla2x00_process_response_queue(struct scsi_qla_host *vha) | |||
835 | DEBUG3(printk(KERN_INFO | 850 | DEBUG3(printk(KERN_INFO |
836 | "scsi(%ld): Process error entry.\n", vha->host_no)); | 851 | "scsi(%ld): Process error entry.\n", vha->host_no)); |
837 | 852 | ||
838 | qla2x00_error_entry(vha, pkt); | 853 | qla2x00_error_entry(vha, rsp, pkt); |
839 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; | 854 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; |
840 | wmb(); | 855 | wmb(); |
841 | continue; | 856 | continue; |
@@ -843,19 +858,19 @@ qla2x00_process_response_queue(struct scsi_qla_host *vha) | |||
843 | 858 | ||
844 | switch (pkt->entry_type) { | 859 | switch (pkt->entry_type) { |
845 | case STATUS_TYPE: | 860 | case STATUS_TYPE: |
846 | qla2x00_status_entry(vha, pkt); | 861 | qla2x00_status_entry(vha, rsp, pkt); |
847 | break; | 862 | break; |
848 | case STATUS_TYPE_21: | 863 | case STATUS_TYPE_21: |
849 | handle_cnt = ((sts21_entry_t *)pkt)->handle_count; | 864 | handle_cnt = ((sts21_entry_t *)pkt)->handle_count; |
850 | for (cnt = 0; cnt < handle_cnt; cnt++) { | 865 | for (cnt = 0; cnt < handle_cnt; cnt++) { |
851 | qla2x00_process_completed_request(vha, | 866 | qla2x00_process_completed_request(vha, rsp->req, |
852 | ((sts21_entry_t *)pkt)->handle[cnt]); | 867 | ((sts21_entry_t *)pkt)->handle[cnt]); |
853 | } | 868 | } |
854 | break; | 869 | break; |
855 | case STATUS_TYPE_22: | 870 | case STATUS_TYPE_22: |
856 | handle_cnt = ((sts22_entry_t *)pkt)->handle_count; | 871 | handle_cnt = ((sts22_entry_t *)pkt)->handle_count; |
857 | for (cnt = 0; cnt < handle_cnt; cnt++) { | 872 | for (cnt = 0; cnt < handle_cnt; cnt++) { |
858 | qla2x00_process_completed_request(vha, | 873 | qla2x00_process_completed_request(vha, rsp->req, |
859 | ((sts22_entry_t *)pkt)->handle[cnt]); | 874 | ((sts22_entry_t *)pkt)->handle[cnt]); |
860 | } | 875 | } |
861 | break; | 876 | break; |
@@ -914,7 +929,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len) | |||
914 | * @pkt: Entry pointer | 929 | * @pkt: Entry pointer |
915 | */ | 930 | */ |
916 | static void | 931 | static void |
917 | qla2x00_status_entry(scsi_qla_host_t *vha, void *pkt) | 932 | qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) |
918 | { | 933 | { |
919 | srb_t *sp; | 934 | srb_t *sp; |
920 | fc_port_t *fcport; | 935 | fc_port_t *fcport; |
@@ -928,7 +943,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, void *pkt) | |||
928 | uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; | 943 | uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; |
929 | uint8_t *rsp_info, *sense_data; | 944 | uint8_t *rsp_info, *sense_data; |
930 | struct qla_hw_data *ha = vha->hw; | 945 | struct qla_hw_data *ha = vha->hw; |
931 | struct req_que *req = ha->req; | 946 | struct req_que *req = rsp->req; |
932 | 947 | ||
933 | sts = (sts_entry_t *) pkt; | 948 | sts = (sts_entry_t *) pkt; |
934 | sts24 = (struct sts_entry_24xx *) pkt; | 949 | sts24 = (struct sts_entry_24xx *) pkt; |
@@ -942,7 +957,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, void *pkt) | |||
942 | 957 | ||
943 | /* Fast path completion. */ | 958 | /* Fast path completion. */ |
944 | if (comp_status == CS_COMPLETE && scsi_status == 0) { | 959 | if (comp_status == CS_COMPLETE && scsi_status == 0) { |
945 | qla2x00_process_completed_request(vha, sts->handle); | 960 | qla2x00_process_completed_request(vha, req, sts->handle); |
946 | 961 | ||
947 | return; | 962 | return; |
948 | } | 963 | } |
@@ -1012,7 +1027,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, void *pkt) | |||
1012 | rsp_info[5], rsp_info[6], rsp_info[7])); | 1027 | rsp_info[5], rsp_info[6], rsp_info[7])); |
1013 | 1028 | ||
1014 | cp->result = DID_BUS_BUSY << 16; | 1029 | cp->result = DID_BUS_BUSY << 16; |
1015 | qla2x00_sp_compl(vha, sp); | 1030 | qla2x00_sp_compl(ha, sp); |
1016 | return; | 1031 | return; |
1017 | } | 1032 | } |
1018 | } | 1033 | } |
@@ -1276,7 +1291,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, void *pkt) | |||
1276 | 1291 | ||
1277 | /* Place command on done queue. */ | 1292 | /* Place command on done queue. */ |
1278 | if (vha->status_srb == NULL) | 1293 | if (vha->status_srb == NULL) |
1279 | qla2x00_sp_compl(vha, sp); | 1294 | qla2x00_sp_compl(ha, sp); |
1280 | } | 1295 | } |
1281 | 1296 | ||
1282 | /** | 1297 | /** |
@@ -1325,7 +1340,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) | |||
1325 | /* Place command on done queue. */ | 1340 | /* Place command on done queue. */ |
1326 | if (sp->request_sense_length == 0) { | 1341 | if (sp->request_sense_length == 0) { |
1327 | vha->status_srb = NULL; | 1342 | vha->status_srb = NULL; |
1328 | qla2x00_sp_compl(vha, sp); | 1343 | qla2x00_sp_compl(ha, sp); |
1329 | } | 1344 | } |
1330 | } | 1345 | } |
1331 | } | 1346 | } |
@@ -1336,11 +1351,11 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) | |||
1336 | * @pkt: Entry pointer | 1351 | * @pkt: Entry pointer |
1337 | */ | 1352 | */ |
1338 | static void | 1353 | static void |
1339 | qla2x00_error_entry(scsi_qla_host_t *vha, sts_entry_t *pkt) | 1354 | qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) |
1340 | { | 1355 | { |
1341 | srb_t *sp; | 1356 | srb_t *sp; |
1342 | struct qla_hw_data *ha = vha->hw; | 1357 | struct qla_hw_data *ha = vha->hw; |
1343 | struct req_que *req = ha->req; | 1358 | struct req_que *req = rsp->req; |
1344 | #if defined(QL_DEBUG_LEVEL_2) | 1359 | #if defined(QL_DEBUG_LEVEL_2) |
1345 | if (pkt->entry_status & RF_INV_E_ORDER) | 1360 | if (pkt->entry_status & RF_INV_E_ORDER) |
1346 | qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); | 1361 | qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); |
@@ -1377,7 +1392,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, sts_entry_t *pkt) | |||
1377 | } else { | 1392 | } else { |
1378 | sp->cmd->result = DID_ERROR << 16; | 1393 | sp->cmd->result = DID_ERROR << 16; |
1379 | } | 1394 | } |
1380 | qla2x00_sp_compl(vha, sp); | 1395 | qla2x00_sp_compl(ha, sp); |
1381 | 1396 | ||
1382 | } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == | 1397 | } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == |
1383 | COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) { | 1398 | COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) { |
@@ -1428,12 +1443,14 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) | |||
1428 | * @ha: SCSI driver HA context | 1443 | * @ha: SCSI driver HA context |
1429 | */ | 1444 | */ |
1430 | void | 1445 | void |
1431 | qla24xx_process_response_queue(struct scsi_qla_host *vha) | 1446 | qla24xx_process_response_queue(struct rsp_que *rsp) |
1432 | { | 1447 | { |
1433 | struct qla_hw_data *ha = vha->hw; | 1448 | struct qla_hw_data *ha = rsp->hw; |
1434 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | 1449 | device_reg_t __iomem *reg = ISP_QUE_REG(ha, rsp->id); |
1435 | struct sts_entry_24xx *pkt; | 1450 | struct sts_entry_24xx *pkt; |
1436 | struct rsp_que *rsp = ha->rsp; | 1451 | struct scsi_qla_host *vha; |
1452 | |||
1453 | vha = qla2x00_get_rsp_host(rsp); | ||
1437 | 1454 | ||
1438 | if (!vha->flags.online) | 1455 | if (!vha->flags.online) |
1439 | return; | 1456 | return; |
@@ -1453,7 +1470,7 @@ qla24xx_process_response_queue(struct scsi_qla_host *vha) | |||
1453 | DEBUG3(printk(KERN_INFO | 1470 | DEBUG3(printk(KERN_INFO |
1454 | "scsi(%ld): Process error entry.\n", vha->host_no)); | 1471 | "scsi(%ld): Process error entry.\n", vha->host_no)); |
1455 | 1472 | ||
1456 | qla2x00_error_entry(vha, (sts_entry_t *) pkt); | 1473 | qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); |
1457 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; | 1474 | ((response_t *)pkt)->signature = RESPONSE_PROCESSED; |
1458 | wmb(); | 1475 | wmb(); |
1459 | continue; | 1476 | continue; |
@@ -1461,7 +1478,7 @@ qla24xx_process_response_queue(struct scsi_qla_host *vha) | |||
1461 | 1478 | ||
1462 | switch (pkt->entry_type) { | 1479 | switch (pkt->entry_type) { |
1463 | case STATUS_TYPE: | 1480 | case STATUS_TYPE: |
1464 | qla2x00_status_entry(vha, pkt); | 1481 | qla2x00_status_entry(vha, rsp, pkt); |
1465 | break; | 1482 | break; |
1466 | case STATUS_CONT_TYPE: | 1483 | case STATUS_CONT_TYPE: |
1467 | qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); | 1484 | qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); |
@@ -1483,7 +1500,10 @@ qla24xx_process_response_queue(struct scsi_qla_host *vha) | |||
1483 | } | 1500 | } |
1484 | 1501 | ||
1485 | /* Adjust ring index */ | 1502 | /* Adjust ring index */ |
1486 | WRT_REG_DWORD(®->rsp_q_out, rsp->ring_index); | 1503 | if (ha->mqenable) |
1504 | WRT_REG_DWORD(®->isp25mq.rsp_q_out, rsp->ring_index); | ||
1505 | else | ||
1506 | WRT_REG_DWORD(®->isp24.rsp_q_out, rsp->ring_index); | ||
1487 | } | 1507 | } |
1488 | 1508 | ||
1489 | static void | 1509 | static void |
@@ -1607,10 +1627,11 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
1607 | mb[1] = RD_REG_WORD(®->mailbox1); | 1627 | mb[1] = RD_REG_WORD(®->mailbox1); |
1608 | mb[2] = RD_REG_WORD(®->mailbox2); | 1628 | mb[2] = RD_REG_WORD(®->mailbox2); |
1609 | mb[3] = RD_REG_WORD(®->mailbox3); | 1629 | mb[3] = RD_REG_WORD(®->mailbox3); |
1610 | qla2x00_async_event(vha, mb); | 1630 | qla2x00_async_event(vha, rsp, mb); |
1611 | break; | 1631 | break; |
1612 | case 0x13: | 1632 | case 0x13: |
1613 | qla24xx_process_response_queue(vha); | 1633 | case 0x14: |
1634 | qla24xx_process_response_queue(rsp); | ||
1614 | break; | 1635 | break; |
1615 | default: | 1636 | default: |
1616 | DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " | 1637 | DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " |
@@ -1635,7 +1656,6 @@ qla24xx_intr_handler(int irq, void *dev_id) | |||
1635 | static irqreturn_t | 1656 | static irqreturn_t |
1636 | qla24xx_msix_rsp_q(int irq, void *dev_id) | 1657 | qla24xx_msix_rsp_q(int irq, void *dev_id) |
1637 | { | 1658 | { |
1638 | scsi_qla_host_t *vha; | ||
1639 | struct qla_hw_data *ha; | 1659 | struct qla_hw_data *ha; |
1640 | struct rsp_que *rsp; | 1660 | struct rsp_que *rsp; |
1641 | struct device_reg_24xx __iomem *reg; | 1661 | struct device_reg_24xx __iomem *reg; |
@@ -1651,8 +1671,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) | |||
1651 | 1671 | ||
1652 | spin_lock_irq(&ha->hardware_lock); | 1672 | spin_lock_irq(&ha->hardware_lock); |
1653 | 1673 | ||
1654 | vha = qla2x00_get_rsp_host(rsp); | 1674 | qla24xx_process_response_queue(rsp); |
1655 | qla24xx_process_response_queue(vha); | ||
1656 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | 1675 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); |
1657 | 1676 | ||
1658 | spin_unlock_irq(&ha->hardware_lock); | 1677 | spin_unlock_irq(&ha->hardware_lock); |
@@ -1661,6 +1680,41 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) | |||
1661 | } | 1680 | } |
1662 | 1681 | ||
1663 | static irqreturn_t | 1682 | static irqreturn_t |
1683 | qla25xx_msix_rsp_q(int irq, void *dev_id) | ||
1684 | { | ||
1685 | struct qla_hw_data *ha; | ||
1686 | struct rsp_que *rsp; | ||
1687 | struct device_reg_24xx __iomem *reg; | ||
1688 | uint16_t msix_disabled_hccr = 0; | ||
1689 | |||
1690 | rsp = (struct rsp_que *) dev_id; | ||
1691 | if (!rsp) { | ||
1692 | printk(KERN_INFO | ||
1693 | "%s(): NULL response queue pointer\n", __func__); | ||
1694 | return IRQ_NONE; | ||
1695 | } | ||
1696 | ha = rsp->hw; | ||
1697 | reg = &ha->iobase->isp24; | ||
1698 | |||
1699 | spin_lock_irq(&ha->hardware_lock); | ||
1700 | |||
1701 | msix_disabled_hccr = rsp->options; | ||
1702 | if (!rsp->id) | ||
1703 | msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22); | ||
1704 | else | ||
1705 | msix_disabled_hccr &= BIT_6; | ||
1706 | |||
1707 | qla24xx_process_response_queue(rsp); | ||
1708 | |||
1709 | if (!msix_disabled_hccr) | ||
1710 | WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); | ||
1711 | |||
1712 | spin_unlock_irq(&ha->hardware_lock); | ||
1713 | |||
1714 | return IRQ_HANDLED; | ||
1715 | } | ||
1716 | |||
1717 | static irqreturn_t | ||
1664 | qla24xx_msix_default(int irq, void *dev_id) | 1718 | qla24xx_msix_default(int irq, void *dev_id) |
1665 | { | 1719 | { |
1666 | scsi_qla_host_t *vha; | 1720 | scsi_qla_host_t *vha; |
@@ -1723,10 +1777,11 @@ qla24xx_msix_default(int irq, void *dev_id) | |||
1723 | mb[1] = RD_REG_WORD(®->mailbox1); | 1777 | mb[1] = RD_REG_WORD(®->mailbox1); |
1724 | mb[2] = RD_REG_WORD(®->mailbox2); | 1778 | mb[2] = RD_REG_WORD(®->mailbox2); |
1725 | mb[3] = RD_REG_WORD(®->mailbox3); | 1779 | mb[3] = RD_REG_WORD(®->mailbox3); |
1726 | qla2x00_async_event(vha, mb); | 1780 | qla2x00_async_event(vha, rsp, mb); |
1727 | break; | 1781 | break; |
1728 | case 0x13: | 1782 | case 0x13: |
1729 | qla24xx_process_response_queue(vha); | 1783 | case 0x14: |
1784 | qla24xx_process_response_queue(rsp); | ||
1730 | break; | 1785 | break; |
1731 | default: | 1786 | default: |
1732 | DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " | 1787 | DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " |
@@ -1756,12 +1811,25 @@ struct qla_init_msix_entry { | |||
1756 | irq_handler_t handler; | 1811 | irq_handler_t handler; |
1757 | }; | 1812 | }; |
1758 | 1813 | ||
1759 | static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = { | 1814 | static struct qla_init_msix_entry base_queue = { |
1760 | { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT, | 1815 | .entry = 0, |
1761 | "qla2xxx (default)", qla24xx_msix_default }, | 1816 | .index = 0, |
1817 | .name = "qla2xxx (default)", | ||
1818 | .handler = qla24xx_msix_default, | ||
1819 | }; | ||
1820 | |||
1821 | static struct qla_init_msix_entry base_rsp_queue = { | ||
1822 | .entry = 1, | ||
1823 | .index = 1, | ||
1824 | .name = "qla2xxx (rsp_q)", | ||
1825 | .handler = qla24xx_msix_rsp_q, | ||
1826 | }; | ||
1762 | 1827 | ||
1763 | { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q, | 1828 | static struct qla_init_msix_entry multi_rsp_queue = { |
1764 | "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, | 1829 | .entry = 1, |
1830 | .index = 1, | ||
1831 | .name = "qla2xxx (multi_q)", | ||
1832 | .handler = qla25xx_msix_rsp_q, | ||
1765 | }; | 1833 | }; |
1766 | 1834 | ||
1767 | static void | 1835 | static void |
@@ -1769,63 +1837,115 @@ qla24xx_disable_msix(struct qla_hw_data *ha) | |||
1769 | { | 1837 | { |
1770 | int i; | 1838 | int i; |
1771 | struct qla_msix_entry *qentry; | 1839 | struct qla_msix_entry *qentry; |
1772 | struct rsp_que *rsp = ha->rsp; | ||
1773 | 1840 | ||
1774 | for (i = 0; i < QLA_MSIX_ENTRIES; i++) { | 1841 | for (i = 0; i < ha->msix_count; i++) { |
1775 | qentry = &ha->msix_entries[imsix_entries[i].index]; | 1842 | qentry = &ha->msix_entries[i]; |
1776 | if (qentry->have_irq) | 1843 | if (qentry->have_irq) |
1777 | free_irq(qentry->msix_vector, rsp); | 1844 | free_irq(qentry->vector, qentry->rsp); |
1778 | } | 1845 | } |
1779 | pci_disable_msix(ha->pdev); | 1846 | pci_disable_msix(ha->pdev); |
1847 | kfree(ha->msix_entries); | ||
1848 | ha->msix_entries = NULL; | ||
1849 | ha->flags.msix_enabled = 0; | ||
1780 | } | 1850 | } |
1781 | 1851 | ||
1782 | static int | 1852 | static int |
1783 | qla24xx_enable_msix(struct qla_hw_data *ha) | 1853 | qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) |
1784 | { | 1854 | { |
1785 | int i, ret; | 1855 | int i, ret; |
1786 | struct rsp_que *rsp = ha->rsp; | 1856 | struct msix_entry *entries; |
1787 | struct msix_entry entries[QLA_MSIX_ENTRIES]; | ||
1788 | struct qla_msix_entry *qentry; | 1857 | struct qla_msix_entry *qentry; |
1858 | struct qla_init_msix_entry *msix_queue; | ||
1859 | |||
1860 | entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, | ||
1861 | GFP_KERNEL); | ||
1862 | if (!entries) | ||
1863 | return -ENOMEM; | ||
1789 | 1864 | ||
1790 | for (i = 0; i < QLA_MSIX_ENTRIES; i++) | 1865 | for (i = 0; i < ha->msix_count; i++) |
1791 | entries[i].entry = imsix_entries[i].entry; | 1866 | entries[i].entry = i; |
1792 | 1867 | ||
1793 | ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries)); | 1868 | ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); |
1794 | if (ret) { | 1869 | if (ret) { |
1795 | qla_printk(KERN_WARNING, ha, | 1870 | qla_printk(KERN_WARNING, ha, |
1796 | "MSI-X: Failed to enable support -- %d/%d\n", | 1871 | "MSI-X: Failed to enable support -- %d/%d\n" |
1797 | QLA_MSIX_ENTRIES, ret); | 1872 | " Retry with %d vectors\n", ha->msix_count, ret, ret); |
1873 | ha->msix_count = ret; | ||
1874 | ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); | ||
1875 | if (ret) { | ||
1876 | qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable" | ||
1877 | " support, giving up -- %d/%d\n", | ||
1878 | ha->msix_count, ret); | ||
1879 | goto msix_out; | ||
1880 | } | ||
1881 | ha->max_queues = ha->msix_count - 1; | ||
1882 | } | ||
1883 | ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * | ||
1884 | ha->msix_count, GFP_KERNEL); | ||
1885 | if (!ha->msix_entries) { | ||
1886 | ret = -ENOMEM; | ||
1798 | goto msix_out; | 1887 | goto msix_out; |
1799 | } | 1888 | } |
1800 | ha->flags.msix_enabled = 1; | 1889 | ha->flags.msix_enabled = 1; |
1801 | 1890 | ||
1802 | for (i = 0; i < QLA_MSIX_ENTRIES; i++) { | 1891 | for (i = 0; i < ha->msix_count; i++) { |
1803 | qentry = &ha->msix_entries[imsix_entries[i].index]; | 1892 | qentry = &ha->msix_entries[i]; |
1804 | qentry->msix_vector = entries[i].vector; | 1893 | qentry->vector = entries[i].vector; |
1805 | qentry->msix_entry = entries[i].entry; | 1894 | qentry->entry = entries[i].entry; |
1806 | qentry->have_irq = 0; | 1895 | qentry->have_irq = 0; |
1807 | ret = request_irq(qentry->msix_vector, | 1896 | qentry->rsp = NULL; |
1808 | imsix_entries[i].handler, 0, imsix_entries[i].name, rsp); | ||
1809 | if (ret) { | ||
1810 | qla_printk(KERN_WARNING, ha, | ||
1811 | "MSI-X: Unable to register handler -- %x/%d.\n", | ||
1812 | imsix_entries[i].index, ret); | ||
1813 | qla24xx_disable_msix(ha); | ||
1814 | goto msix_out; | ||
1815 | } | ||
1816 | qentry->have_irq = 1; | ||
1817 | } | 1897 | } |
1818 | 1898 | ||
1899 | /* Enable MSI-X for AENs for queue 0 */ | ||
1900 | qentry = &ha->msix_entries[0]; | ||
1901 | ret = request_irq(qentry->vector, base_queue.handler, 0, | ||
1902 | base_queue.name, rsp); | ||
1903 | if (ret) { | ||
1904 | qla_printk(KERN_WARNING, ha, | ||
1905 | "MSI-X: Unable to register handler -- %x/%d.\n", | ||
1906 | qentry->vector, ret); | ||
1907 | qla24xx_disable_msix(ha); | ||
1908 | goto msix_out; | ||
1909 | } | ||
1910 | qentry->have_irq = 1; | ||
1911 | qentry->rsp = rsp; | ||
1912 | |||
1913 | /* Enable MSI-X vector for response queue update for queue 0 */ | ||
1914 | if (ha->max_queues > 1 && ha->mqiobase) { | ||
1915 | ha->mqenable = 1; | ||
1916 | msix_queue = &multi_rsp_queue; | ||
1917 | qla_printk(KERN_INFO, ha, | ||
1918 | "MQ enabled, Number of Queue Resources: %d \n", | ||
1919 | ha->max_queues); | ||
1920 | } else { | ||
1921 | ha->mqenable = 0; | ||
1922 | msix_queue = &base_rsp_queue; | ||
1923 | } | ||
1924 | |||
1925 | qentry = &ha->msix_entries[1]; | ||
1926 | ret = request_irq(qentry->vector, msix_queue->handler, 0, | ||
1927 | msix_queue->name, rsp); | ||
1928 | if (ret) { | ||
1929 | qla_printk(KERN_WARNING, ha, | ||
1930 | "MSI-X: Unable to register handler -- %x/%d.\n", | ||
1931 | qentry->vector, ret); | ||
1932 | qla24xx_disable_msix(ha); | ||
1933 | ha->mqenable = 0; | ||
1934 | goto msix_out; | ||
1935 | } | ||
1936 | qentry->have_irq = 1; | ||
1937 | qentry->rsp = rsp; | ||
1938 | |||
1819 | msix_out: | 1939 | msix_out: |
1940 | kfree(entries); | ||
1820 | return ret; | 1941 | return ret; |
1821 | } | 1942 | } |
1822 | 1943 | ||
1823 | int | 1944 | int |
1824 | qla2x00_request_irqs(struct qla_hw_data *ha) | 1945 | qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) |
1825 | { | 1946 | { |
1826 | int ret; | 1947 | int ret; |
1827 | device_reg_t __iomem *reg = ha->iobase; | 1948 | device_reg_t __iomem *reg = ha->iobase; |
1828 | struct rsp_que *rsp = ha->rsp; | ||
1829 | 1949 | ||
1830 | /* If possible, enable MSI-X. */ | 1950 | /* If possible, enable MSI-X. */ |
1831 | if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha)) | 1951 | if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha)) |
@@ -1852,7 +1972,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha) | |||
1852 | goto skip_msi; | 1972 | goto skip_msi; |
1853 | } | 1973 | } |
1854 | 1974 | ||
1855 | ret = qla24xx_enable_msix(ha); | 1975 | ret = qla24xx_enable_msix(ha, rsp); |
1856 | if (!ret) { | 1976 | if (!ret) { |
1857 | DEBUG2(qla_printk(KERN_INFO, ha, | 1977 | DEBUG2(qla_printk(KERN_INFO, ha, |
1858 | "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, | 1978 | "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, |
@@ -1903,7 +2023,7 @@ void | |||
1903 | qla2x00_free_irqs(scsi_qla_host_t *vha) | 2023 | qla2x00_free_irqs(scsi_qla_host_t *vha) |
1904 | { | 2024 | { |
1905 | struct qla_hw_data *ha = vha->hw; | 2025 | struct qla_hw_data *ha = vha->hw; |
1906 | struct rsp_que *rsp = ha->rsp; | 2026 | struct rsp_que *rsp = ha->rsp_q_map[0]; |
1907 | 2027 | ||
1908 | if (ha->flags.msix_enabled) | 2028 | if (ha->flags.msix_enabled) |
1909 | qla24xx_disable_msix(ha); | 2029 | qla24xx_disable_msix(ha); |
@@ -1919,16 +2039,41 @@ qla2x00_get_rsp_host(struct rsp_que *rsp) | |||
1919 | srb_t *sp; | 2039 | srb_t *sp; |
1920 | struct qla_hw_data *ha = rsp->hw; | 2040 | struct qla_hw_data *ha = rsp->hw; |
1921 | struct scsi_qla_host *vha = NULL; | 2041 | struct scsi_qla_host *vha = NULL; |
1922 | struct sts_entry_24xx *pkt = (struct sts_entry_24xx *) rsp->ring_ptr; | 2042 | struct sts_entry_24xx *pkt; |
1923 | 2043 | struct req_que *req; | |
1924 | if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) { | 2044 | |
1925 | sp = ha->req->outstanding_cmds[pkt->handle]; | 2045 | if (rsp->id) { |
1926 | if (sp) | 2046 | pkt = (struct sts_entry_24xx *) rsp->ring_ptr; |
1927 | vha = sp->vha; | 2047 | req = rsp->req; |
2048 | if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) { | ||
2049 | sp = req->outstanding_cmds[pkt->handle]; | ||
2050 | if (sp) | ||
2051 | vha = sp->vha; | ||
2052 | } | ||
1928 | } | 2053 | } |
1929 | if (!vha) | 2054 | if (!vha) |
1930 | /* Invalid entry, handle it in base queue */ | 2055 | /* handle it in base queue */ |
1931 | vha = pci_get_drvdata(ha->pdev); | 2056 | vha = pci_get_drvdata(ha->pdev); |
1932 | 2057 | ||
1933 | return vha; | 2058 | return vha; |
1934 | } | 2059 | } |
2060 | |||
2061 | int qla25xx_request_irq(struct rsp_que *rsp) | ||
2062 | { | ||
2063 | struct qla_hw_data *ha = rsp->hw; | ||
2064 | struct qla_init_msix_entry *intr = &multi_rsp_queue; | ||
2065 | struct qla_msix_entry *msix = rsp->msix; | ||
2066 | int ret; | ||
2067 | |||
2068 | ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); | ||
2069 | if (ret) { | ||
2070 | qla_printk(KERN_WARNING, ha, | ||
2071 | "MSI-X: Unable to register handler -- %x/%d.\n", | ||
2072 | msix->vector, ret); | ||
2073 | return ret; | ||
2074 | } | ||
2075 | msix->have_irq = 1; | ||
2076 | msix->rsp = rsp; | ||
2077 | return ret; | ||
2078 | } | ||
2079 | |||
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index bbe7181fb3d5..c54bc977c7b8 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -153,7 +153,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
153 | break; | 153 | break; |
154 | 154 | ||
155 | /* Check for pending interrupts. */ | 155 | /* Check for pending interrupts. */ |
156 | qla2x00_poll(ha->rsp); | 156 | qla2x00_poll(ha->rsp_q_map[0]); |
157 | 157 | ||
158 | if (command != MBC_LOAD_RISC_RAM_EXTENDED && | 158 | if (command != MBC_LOAD_RISC_RAM_EXTENDED && |
159 | !ha->flags.mbox_int) | 159 | !ha->flags.mbox_int) |
@@ -223,7 +223,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
223 | "interrupt.\n", __func__, base_vha->host_no)); | 223 | "interrupt.\n", __func__, base_vha->host_no)); |
224 | 224 | ||
225 | /* polling mode for non isp_abort commands. */ | 225 | /* polling mode for non isp_abort commands. */ |
226 | qla2x00_poll(ha->rsp); | 226 | qla2x00_poll(ha->rsp_q_map[0]); |
227 | } | 227 | } |
228 | 228 | ||
229 | if (rval == QLA_FUNCTION_TIMEOUT && | 229 | if (rval == QLA_FUNCTION_TIMEOUT && |
@@ -713,8 +713,6 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, | |||
713 | /*EMPTY*/ | 713 | /*EMPTY*/ |
714 | DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n", | 714 | DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n", |
715 | vha->host_no, rval)); | 715 | vha->host_no, rval)); |
716 | DEBUG2(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n", | ||
717 | vha->host_no, rval)); | ||
718 | } else { | 716 | } else { |
719 | sts_entry_t *sts_entry = (sts_entry_t *) buffer; | 717 | sts_entry_t *sts_entry = (sts_entry_t *) buffer; |
720 | 718 | ||
@@ -749,16 +747,15 @@ qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, | |||
749 | * Kernel context. | 747 | * Kernel context. |
750 | */ | 748 | */ |
751 | int | 749 | int |
752 | qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp) | 750 | qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) |
753 | { | 751 | { |
754 | unsigned long flags = 0; | 752 | unsigned long flags = 0; |
755 | fc_port_t *fcport; | 753 | fc_port_t *fcport; |
756 | int rval; | 754 | int rval; |
757 | uint32_t handle; | 755 | uint32_t handle = 0; |
758 | mbx_cmd_t mc; | 756 | mbx_cmd_t mc; |
759 | mbx_cmd_t *mcp = &mc; | 757 | mbx_cmd_t *mcp = &mc; |
760 | struct qla_hw_data *ha = vha->hw; | 758 | struct qla_hw_data *ha = vha->hw; |
761 | struct req_que *req = ha->req; | ||
762 | 759 | ||
763 | DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); | 760 | DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); |
764 | 761 | ||
@@ -808,11 +805,15 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l) | |||
808 | mbx_cmd_t mc; | 805 | mbx_cmd_t mc; |
809 | mbx_cmd_t *mcp = &mc; | 806 | mbx_cmd_t *mcp = &mc; |
810 | scsi_qla_host_t *vha; | 807 | scsi_qla_host_t *vha; |
808 | struct req_que *req; | ||
809 | struct rsp_que *rsp; | ||
811 | 810 | ||
812 | DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); | 811 | DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); |
813 | 812 | ||
814 | l = l; | 813 | l = l; |
815 | vha = fcport->vha; | 814 | vha = fcport->vha; |
815 | req = vha->hw->req_q_map[0]; | ||
816 | rsp = vha->hw->rsp_q_map[0]; | ||
816 | mcp->mb[0] = MBC_ABORT_TARGET; | 817 | mcp->mb[0] = MBC_ABORT_TARGET; |
817 | mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; | 818 | mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; |
818 | if (HAS_EXTENDED_IDS(vha->hw)) { | 819 | if (HAS_EXTENDED_IDS(vha->hw)) { |
@@ -835,7 +836,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l) | |||
835 | } | 836 | } |
836 | 837 | ||
837 | /* Issue marker IOCB. */ | 838 | /* Issue marker IOCB. */ |
838 | rval2 = qla2x00_marker(vha, fcport->loop_id, 0, MK_SYNC_ID); | 839 | rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0, |
840 | MK_SYNC_ID); | ||
839 | if (rval2 != QLA_SUCCESS) { | 841 | if (rval2 != QLA_SUCCESS) { |
840 | DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " | 842 | DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " |
841 | "(%x).\n", __func__, vha->host_no, rval2)); | 843 | "(%x).\n", __func__, vha->host_no, rval2)); |
@@ -853,10 +855,14 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l) | |||
853 | mbx_cmd_t mc; | 855 | mbx_cmd_t mc; |
854 | mbx_cmd_t *mcp = &mc; | 856 | mbx_cmd_t *mcp = &mc; |
855 | scsi_qla_host_t *vha; | 857 | scsi_qla_host_t *vha; |
858 | struct req_que *req; | ||
859 | struct rsp_que *rsp; | ||
856 | 860 | ||
857 | DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); | 861 | DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); |
858 | 862 | ||
859 | vha = fcport->vha; | 863 | vha = fcport->vha; |
864 | req = vha->hw->req_q_map[0]; | ||
865 | rsp = vha->hw->rsp_q_map[0]; | ||
860 | mcp->mb[0] = MBC_LUN_RESET; | 866 | mcp->mb[0] = MBC_LUN_RESET; |
861 | mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; | 867 | mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; |
862 | if (HAS_EXTENDED_IDS(vha->hw)) | 868 | if (HAS_EXTENDED_IDS(vha->hw)) |
@@ -877,7 +883,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l) | |||
877 | } | 883 | } |
878 | 884 | ||
879 | /* Issue marker IOCB. */ | 885 | /* Issue marker IOCB. */ |
880 | rval2 = qla2x00_marker(vha, fcport->loop_id, l, MK_SYNC_ID_LUN); | 886 | rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, |
887 | MK_SYNC_ID_LUN); | ||
881 | if (rval2 != QLA_SUCCESS) { | 888 | if (rval2 != QLA_SUCCESS) { |
882 | DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " | 889 | DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " |
883 | "(%x).\n", __func__, vha->host_no, rval2)); | 890 | "(%x).\n", __func__, vha->host_no, rval2)); |
@@ -1743,6 +1750,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, | |||
1743 | lg->port_id[1] = area; | 1750 | lg->port_id[1] = area; |
1744 | lg->port_id[2] = domain; | 1751 | lg->port_id[2] = domain; |
1745 | lg->vp_index = vha->vp_idx; | 1752 | lg->vp_index = vha->vp_idx; |
1753 | |||
1746 | rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); | 1754 | rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); |
1747 | if (rval != QLA_SUCCESS) { | 1755 | if (rval != QLA_SUCCESS) { |
1748 | DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB " | 1756 | DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB " |
@@ -1753,9 +1761,9 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, | |||
1753 | lg->entry_status)); | 1761 | lg->entry_status)); |
1754 | rval = QLA_FUNCTION_FAILED; | 1762 | rval = QLA_FUNCTION_FAILED; |
1755 | } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { | 1763 | } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { |
1756 | DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " | 1764 | DEBUG2_3_11(printk("%s(%ld %d): failed to complete IOCB " |
1757 | "-- completion status (%x) ioparam=%x/%x.\n", __func__, | 1765 | "-- completion status (%x) ioparam=%x/%x.\n", __func__, |
1758 | vha->host_no, le16_to_cpu(lg->comp_status), | 1766 | vha->host_no, vha->vp_idx, le16_to_cpu(lg->comp_status), |
1759 | le32_to_cpu(lg->io_parameter[0]), | 1767 | le32_to_cpu(lg->io_parameter[0]), |
1760 | le32_to_cpu(lg->io_parameter[1]))); | 1768 | le32_to_cpu(lg->io_parameter[1]))); |
1761 | } else { | 1769 | } else { |
@@ -2173,7 +2181,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, | |||
2173 | } | 2181 | } |
2174 | 2182 | ||
2175 | int | 2183 | int |
2176 | qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp) | 2184 | qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) |
2177 | { | 2185 | { |
2178 | int rval; | 2186 | int rval; |
2179 | fc_port_t *fcport; | 2187 | fc_port_t *fcport; |
@@ -2183,7 +2191,6 @@ qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp) | |||
2183 | dma_addr_t abt_dma; | 2191 | dma_addr_t abt_dma; |
2184 | uint32_t handle; | 2192 | uint32_t handle; |
2185 | struct qla_hw_data *ha = vha->hw; | 2193 | struct qla_hw_data *ha = vha->hw; |
2186 | struct req_que *req = ha->req; | ||
2187 | 2194 | ||
2188 | DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); | 2195 | DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); |
2189 | 2196 | ||
@@ -2216,6 +2223,9 @@ qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp) | |||
2216 | abt->port_id[1] = fcport->d_id.b.area; | 2223 | abt->port_id[1] = fcport->d_id.b.area; |
2217 | abt->port_id[2] = fcport->d_id.b.domain; | 2224 | abt->port_id[2] = fcport->d_id.b.domain; |
2218 | abt->vp_index = fcport->vp_idx; | 2225 | abt->vp_index = fcport->vp_idx; |
2226 | |||
2227 | abt->req_que_no = cpu_to_le16(req->id); | ||
2228 | |||
2219 | rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); | 2229 | rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); |
2220 | if (rval != QLA_SUCCESS) { | 2230 | if (rval != QLA_SUCCESS) { |
2221 | DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n", | 2231 | DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n", |
@@ -2255,11 +2265,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, | |||
2255 | dma_addr_t tsk_dma; | 2265 | dma_addr_t tsk_dma; |
2256 | scsi_qla_host_t *vha; | 2266 | scsi_qla_host_t *vha; |
2257 | struct qla_hw_data *ha; | 2267 | struct qla_hw_data *ha; |
2268 | struct req_que *req; | ||
2269 | struct rsp_que *rsp; | ||
2258 | 2270 | ||
2259 | DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); | 2271 | DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); |
2260 | 2272 | ||
2261 | vha = fcport->vha; | 2273 | vha = fcport->vha; |
2262 | ha = vha->hw; | 2274 | ha = vha->hw; |
2275 | req = ha->req_q_map[0]; | ||
2276 | rsp = ha->rsp_q_map[0]; | ||
2263 | tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); | 2277 | tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); |
2264 | if (tsk == NULL) { | 2278 | if (tsk == NULL) { |
2265 | DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " | 2279 | DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " |
@@ -2301,7 +2315,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, | |||
2301 | } | 2315 | } |
2302 | 2316 | ||
2303 | /* Issue marker IOCB. */ | 2317 | /* Issue marker IOCB. */ |
2304 | rval2 = qla2x00_marker(vha, fcport->loop_id, l, | 2318 | rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, |
2305 | type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); | 2319 | type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); |
2306 | if (rval2 != QLA_SUCCESS) { | 2320 | if (rval2 != QLA_SUCCESS) { |
2307 | DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " | 2321 | DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " |
@@ -3069,3 +3083,108 @@ verify_done: | |||
3069 | 3083 | ||
3070 | return rval; | 3084 | return rval; |
3071 | } | 3085 | } |
3086 | |||
3087 | int | ||
3088 | qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req, | ||
3089 | uint8_t options) | ||
3090 | { | ||
3091 | int rval; | ||
3092 | unsigned long flags; | ||
3093 | mbx_cmd_t mc; | ||
3094 | mbx_cmd_t *mcp = &mc; | ||
3095 | struct device_reg_25xxmq __iomem *reg; | ||
3096 | struct qla_hw_data *ha = vha->hw; | ||
3097 | |||
3098 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; | ||
3099 | mcp->mb[1] = options; | ||
3100 | mcp->mb[2] = MSW(LSD(req->dma)); | ||
3101 | mcp->mb[3] = LSW(LSD(req->dma)); | ||
3102 | mcp->mb[6] = MSW(MSD(req->dma)); | ||
3103 | mcp->mb[7] = LSW(MSD(req->dma)); | ||
3104 | mcp->mb[5] = req->length; | ||
3105 | if (req->rsp) | ||
3106 | mcp->mb[10] = req->rsp->id; | ||
3107 | mcp->mb[12] = req->qos; | ||
3108 | mcp->mb[11] = req->vp_idx; | ||
3109 | mcp->mb[13] = req->rid; | ||
3110 | |||
3111 | reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) + | ||
3112 | QLA_QUE_PAGE * req->id); | ||
3113 | |||
3114 | mcp->mb[4] = req->id; | ||
3115 | /* que in ptr index */ | ||
3116 | mcp->mb[8] = 0; | ||
3117 | /* que out ptr index */ | ||
3118 | mcp->mb[9] = 0; | ||
3119 | mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| | ||
3120 | MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | ||
3121 | mcp->in_mb = MBX_0; | ||
3122 | mcp->flags = MBX_DMA_OUT; | ||
3123 | mcp->tov = 60; | ||
3124 | |||
3125 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
3126 | if (!(options & BIT_0)) { | ||
3127 | WRT_REG_DWORD(®->req_q_in, 0); | ||
3128 | WRT_REG_DWORD(®->req_q_out, 0); | ||
3129 | } | ||
3130 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
3131 | |||
3132 | rval = (int)qla2x00_mailbox_command(vha, mcp); | ||
3133 | if (rval != QLA_SUCCESS) | ||
3134 | DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x mb0=%x.\n", | ||
3135 | __func__, vha->host_no, rval, mcp->mb[0])); | ||
3136 | return rval; | ||
3137 | } | ||
3138 | |||
3139 | int | ||
3140 | qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp, | ||
3141 | uint8_t options) | ||
3142 | { | ||
3143 | int rval; | ||
3144 | unsigned long flags; | ||
3145 | mbx_cmd_t mc; | ||
3146 | mbx_cmd_t *mcp = &mc; | ||
3147 | struct device_reg_25xxmq __iomem *reg; | ||
3148 | struct qla_hw_data *ha = vha->hw; | ||
3149 | |||
3150 | mcp->mb[0] = MBC_INITIALIZE_MULTIQ; | ||
3151 | mcp->mb[1] = options; | ||
3152 | mcp->mb[2] = MSW(LSD(rsp->dma)); | ||
3153 | mcp->mb[3] = LSW(LSD(rsp->dma)); | ||
3154 | mcp->mb[6] = MSW(MSD(rsp->dma)); | ||
3155 | mcp->mb[7] = LSW(MSD(rsp->dma)); | ||
3156 | mcp->mb[5] = rsp->length; | ||
3157 | mcp->mb[11] = rsp->vp_idx; | ||
3158 | mcp->mb[14] = rsp->msix->vector; | ||
3159 | mcp->mb[13] = rsp->rid; | ||
3160 | |||
3161 | reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) + | ||
3162 | QLA_QUE_PAGE * rsp->id); | ||
3163 | |||
3164 | mcp->mb[4] = rsp->id; | ||
3165 | /* que in ptr index */ | ||
3166 | mcp->mb[8] = 0; | ||
3167 | /* que out ptr index */ | ||
3168 | mcp->mb[9] = 0; | ||
3169 | mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7 | ||
3170 | |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; | ||
3171 | mcp->in_mb = MBX_0; | ||
3172 | mcp->flags = MBX_DMA_OUT; | ||
3173 | mcp->tov = 60; | ||
3174 | |||
3175 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
3176 | if (!(options & BIT_0)) { | ||
3177 | WRT_REG_DWORD(®->rsp_q_out, 0); | ||
3178 | WRT_REG_DWORD(®->rsp_q_in, 0); | ||
3179 | } | ||
3180 | |||
3181 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
3182 | |||
3183 | rval = (int)qla2x00_mailbox_command(vha, mcp); | ||
3184 | if (rval != QLA_SUCCESS) | ||
3185 | DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x " | ||
3186 | "mb0=%x.\n", __func__, | ||
3187 | vha->host_no, rval, mcp->mb[0])); | ||
3188 | return rval; | ||
3189 | } | ||
3190 | |||
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index da3db3abb82c..386ffeae5b5a 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
@@ -101,6 +101,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) | |||
101 | "loop_id=0x%04x :%x\n", | 101 | "loop_id=0x%04x :%x\n", |
102 | vha->host_no, fcport->loop_id, fcport->vp_idx)); | 102 | vha->host_no, fcport->loop_id, fcport->vp_idx)); |
103 | 103 | ||
104 | atomic_set(&fcport->state, FCS_DEVICE_DEAD); | ||
104 | qla2x00_mark_device_lost(vha, fcport, 0, 0); | 105 | qla2x00_mark_device_lost(vha, fcport, 0, 0); |
105 | atomic_set(&fcport->state, FCS_UNCONFIGURED); | 106 | atomic_set(&fcport->state, FCS_UNCONFIGURED); |
106 | } | 107 | } |
@@ -191,9 +192,10 @@ qla24xx_configure_vp(scsi_qla_host_t *vha) | |||
191 | } | 192 | } |
192 | 193 | ||
193 | void | 194 | void |
194 | qla2x00_alert_all_vps(struct qla_hw_data *ha, uint16_t *mb) | 195 | qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) |
195 | { | 196 | { |
196 | scsi_qla_host_t *vha; | 197 | scsi_qla_host_t *vha; |
198 | struct qla_hw_data *ha = rsp->hw; | ||
197 | int i = 0; | 199 | int i = 0; |
198 | 200 | ||
199 | list_for_each_entry(vha, &ha->vp_list, list) { | 201 | list_for_each_entry(vha, &ha->vp_list, list) { |
@@ -210,7 +212,7 @@ qla2x00_alert_all_vps(struct qla_hw_data *ha, uint16_t *mb) | |||
210 | DEBUG15(printk("scsi(%ld)%s: Async_event for" | 212 | DEBUG15(printk("scsi(%ld)%s: Async_event for" |
211 | " VP[%d], mb = 0x%x, vha=%p\n", | 213 | " VP[%d], mb = 0x%x, vha=%p\n", |
212 | vha->host_no, __func__, i, *mb, vha)); | 214 | vha->host_no, __func__, i, *mb, vha)); |
213 | qla2x00_async_event(vha, mb); | 215 | qla2x00_async_event(vha, rsp, mb); |
214 | break; | 216 | break; |
215 | } | 217 | } |
216 | } | 218 | } |
@@ -282,8 +284,7 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha) | |||
282 | clear_bit(RESET_ACTIVE, &vha->dpc_flags); | 284 | clear_bit(RESET_ACTIVE, &vha->dpc_flags); |
283 | } | 285 | } |
284 | 286 | ||
285 | if (atomic_read(&vha->vp_state) == VP_ACTIVE && | 287 | if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { |
286 | test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { | ||
287 | if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { | 288 | if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { |
288 | qla2x00_loop_resync(vha); | 289 | qla2x00_loop_resync(vha); |
289 | clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); | 290 | clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); |
@@ -367,7 +368,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) | |||
367 | 368 | ||
368 | host = vha->host; | 369 | host = vha->host; |
369 | fc_vport->dd_data = vha; | 370 | fc_vport->dd_data = vha; |
370 | |||
371 | /* New host info */ | 371 | /* New host info */ |
372 | u64_to_wwn(fc_vport->node_name, vha->node_name); | 372 | u64_to_wwn(fc_vport->node_name, vha->node_name); |
373 | u64_to_wwn(fc_vport->port_name, vha->port_name); | 373 | u64_to_wwn(fc_vport->port_name, vha->port_name); |
@@ -396,7 +396,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) | |||
396 | 396 | ||
397 | qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); | 397 | qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); |
398 | 398 | ||
399 | host->can_queue = ha->req->length + 128; | 399 | memset(vha->req_ques, 0, sizeof(vha->req_ques) * QLA_MAX_HOST_QUES); |
400 | vha->req_ques[0] = ha->req_q_map[0]->id; | ||
401 | host->can_queue = ha->req_q_map[0]->length + 128; | ||
400 | host->this_id = 255; | 402 | host->this_id = 255; |
401 | host->cmd_per_lun = 3; | 403 | host->cmd_per_lun = 3; |
402 | host->max_cmd_len = MAX_CMDSZ; | 404 | host->max_cmd_len = MAX_CMDSZ; |
@@ -416,3 +418,338 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) | |||
416 | create_vhost_failed: | 418 | create_vhost_failed: |
417 | return NULL; | 419 | return NULL; |
418 | } | 420 | } |
421 | |||
422 | static void | ||
423 | qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req) | ||
424 | { | ||
425 | struct qla_hw_data *ha = vha->hw; | ||
426 | uint16_t que_id = req->id; | ||
427 | |||
428 | dma_free_coherent(&ha->pdev->dev, (req->length + 1) * | ||
429 | sizeof(request_t), req->ring, req->dma); | ||
430 | req->ring = NULL; | ||
431 | req->dma = 0; | ||
432 | if (que_id) { | ||
433 | ha->req_q_map[que_id] = NULL; | ||
434 | mutex_lock(&ha->vport_lock); | ||
435 | clear_bit(que_id, ha->req_qid_map); | ||
436 | mutex_unlock(&ha->vport_lock); | ||
437 | } | ||
438 | kfree(req); | ||
439 | req = NULL; | ||
440 | } | ||
441 | |||
442 | static void | ||
443 | qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) | ||
444 | { | ||
445 | struct qla_hw_data *ha = vha->hw; | ||
446 | uint16_t que_id = rsp->id; | ||
447 | |||
448 | if (rsp->msix && rsp->msix->have_irq) { | ||
449 | free_irq(rsp->msix->vector, rsp); | ||
450 | rsp->msix->have_irq = 0; | ||
451 | rsp->msix->rsp = NULL; | ||
452 | } | ||
453 | dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) * | ||
454 | sizeof(response_t), rsp->ring, rsp->dma); | ||
455 | rsp->ring = NULL; | ||
456 | rsp->dma = 0; | ||
457 | if (que_id) { | ||
458 | ha->rsp_q_map[que_id] = NULL; | ||
459 | mutex_lock(&ha->vport_lock); | ||
460 | clear_bit(que_id, ha->rsp_qid_map); | ||
461 | mutex_unlock(&ha->vport_lock); | ||
462 | } | ||
463 | kfree(rsp); | ||
464 | rsp = NULL; | ||
465 | } | ||
466 | |||
467 | int | ||
468 | qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) | ||
469 | { | ||
470 | int ret = -1; | ||
471 | |||
472 | if (req) { | ||
473 | req->options |= BIT_0; | ||
474 | ret = qla25xx_init_req_que(vha, req, req->options); | ||
475 | } | ||
476 | if (ret == QLA_SUCCESS) | ||
477 | qla25xx_free_req_que(vha, req); | ||
478 | |||
479 | return ret; | ||
480 | } | ||
481 | |||
482 | int | ||
483 | qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) | ||
484 | { | ||
485 | int ret = -1; | ||
486 | |||
487 | if (rsp) { | ||
488 | rsp->options |= BIT_0; | ||
489 | ret = qla25xx_init_rsp_que(vha, rsp, rsp->options); | ||
490 | } | ||
491 | if (ret == QLA_SUCCESS) | ||
492 | qla25xx_free_rsp_que(vha, rsp); | ||
493 | |||
494 | return ret; | ||
495 | } | ||
496 | |||
497 | int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos) | ||
498 | { | ||
499 | int ret = 0; | ||
500 | struct qla_hw_data *ha = vha->hw; | ||
501 | struct req_que *req = ha->req_q_map[que]; | ||
502 | |||
503 | req->options |= BIT_3; | ||
504 | req->qos = qos; | ||
505 | ret = qla25xx_init_req_que(vha, req, req->options); | ||
506 | if (ret != QLA_SUCCESS) | ||
507 | DEBUG2_17(printk(KERN_WARNING "%s failed\n", __func__)); | ||
508 | /* restore options bit */ | ||
509 | req->options &= ~BIT_3; | ||
510 | return ret; | ||
511 | } | ||
512 | |||
513 | |||
514 | /* Delete all queues for a given vhost */ | ||
515 | int | ||
516 | qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no) | ||
517 | { | ||
518 | int cnt, ret = 0; | ||
519 | struct req_que *req = NULL; | ||
520 | struct rsp_que *rsp = NULL; | ||
521 | struct qla_hw_data *ha = vha->hw; | ||
522 | |||
523 | if (que_no) { | ||
524 | /* Delete request queue */ | ||
525 | req = ha->req_q_map[que_no]; | ||
526 | if (req) { | ||
527 | rsp = req->rsp; | ||
528 | ret = qla25xx_delete_req_que(vha, req); | ||
529 | if (ret != QLA_SUCCESS) { | ||
530 | qla_printk(KERN_WARNING, ha, | ||
531 | "Couldn't delete req que %d\n", req->id); | ||
532 | return ret; | ||
533 | } | ||
534 | /* Delete associated response queue */ | ||
535 | if (rsp) { | ||
536 | ret = qla25xx_delete_rsp_que(vha, rsp); | ||
537 | if (ret != QLA_SUCCESS) { | ||
538 | qla_printk(KERN_WARNING, ha, | ||
539 | "Couldn't delete rsp que %d\n", | ||
540 | rsp->id); | ||
541 | return ret; | ||
542 | } | ||
543 | } | ||
544 | } | ||
545 | } else { /* delete all queues of this host */ | ||
546 | for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) { | ||
547 | /* Delete request queues */ | ||
548 | req = ha->req_q_map[vha->req_ques[cnt]]; | ||
549 | if (req && req->id) { | ||
550 | rsp = req->rsp; | ||
551 | ret = qla25xx_delete_req_que(vha, req); | ||
552 | if (ret != QLA_SUCCESS) { | ||
553 | qla_printk(KERN_WARNING, ha, | ||
554 | "Couldn't delete req que %d\n", | ||
555 | vha->req_ques[cnt]); | ||
556 | return ret; | ||
557 | } | ||
558 | vha->req_ques[cnt] = ha->req_q_map[0]->id; | ||
559 | /* Delete associated response queue */ | ||
560 | if (rsp && rsp->id) { | ||
561 | ret = qla25xx_delete_rsp_que(vha, rsp); | ||
562 | if (ret != QLA_SUCCESS) { | ||
563 | qla_printk(KERN_WARNING, ha, | ||
564 | "Couldn't delete rsp que %d\n", | ||
565 | rsp->id); | ||
566 | return ret; | ||
567 | } | ||
568 | } | ||
569 | } | ||
570 | } | ||
571 | } | ||
572 | qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n", | ||
573 | vha->vp_idx); | ||
574 | return ret; | ||
575 | } | ||
576 | |||
577 | int | ||
578 | qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, | ||
579 | uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos) | ||
580 | { | ||
581 | int ret = 0; | ||
582 | struct req_que *req = NULL; | ||
583 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); | ||
584 | uint16_t que_id = 0; | ||
585 | |||
586 | req = kzalloc(sizeof(struct req_que), GFP_KERNEL); | ||
587 | if (req == NULL) { | ||
588 | qla_printk(KERN_WARNING, ha, "could not allocate memory" | ||
589 | "for request que\n"); | ||
590 | goto que_failed; | ||
591 | } | ||
592 | |||
593 | req->length = REQUEST_ENTRY_CNT_24XX; | ||
594 | req->ring = dma_alloc_coherent(&ha->pdev->dev, | ||
595 | (req->length + 1) * sizeof(request_t), | ||
596 | &req->dma, GFP_KERNEL); | ||
597 | if (req->ring == NULL) { | ||
598 | qla_printk(KERN_WARNING, ha, | ||
599 | "Memory Allocation failed - request_ring\n"); | ||
600 | goto que_failed; | ||
601 | } | ||
602 | |||
603 | mutex_lock(&ha->vport_lock); | ||
604 | que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues); | ||
605 | if (que_id >= ha->max_queues) { | ||
606 | mutex_unlock(&ha->vport_lock); | ||
607 | qla_printk(KERN_INFO, ha, "No resources to create " | ||
608 | "additional request queue\n"); | ||
609 | goto que_failed; | ||
610 | } | ||
611 | set_bit(que_id, ha->req_qid_map); | ||
612 | ha->req_q_map[que_id] = req; | ||
613 | req->rid = rid; | ||
614 | req->vp_idx = vp_idx; | ||
615 | req->qos = qos; | ||
616 | |||
617 | if (ha->rsp_q_map[rsp_que]) | ||
618 | req->rsp = ha->rsp_q_map[rsp_que]; | ||
619 | /* Use alternate PCI bus number */ | ||
620 | if (MSB(req->rid)) | ||
621 | options |= BIT_4; | ||
622 | /* Use alternate PCI devfn */ | ||
623 | if (LSB(req->rid)) | ||
624 | options |= BIT_5; | ||
625 | req->options = options; | ||
626 | req->ring_ptr = req->ring; | ||
627 | req->ring_index = 0; | ||
628 | req->cnt = req->length; | ||
629 | req->id = que_id; | ||
630 | mutex_unlock(&ha->vport_lock); | ||
631 | |||
632 | ret = qla25xx_init_req_que(base_vha, req, options); | ||
633 | if (ret != QLA_SUCCESS) { | ||
634 | qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); | ||
635 | mutex_lock(&ha->vport_lock); | ||
636 | clear_bit(que_id, ha->req_qid_map); | ||
637 | mutex_unlock(&ha->vport_lock); | ||
638 | goto que_failed; | ||
639 | } | ||
640 | |||
641 | return req->id; | ||
642 | |||
643 | que_failed: | ||
644 | qla25xx_free_req_que(base_vha, req); | ||
645 | return 0; | ||
646 | } | ||
647 | |||
648 | /* create response queue */ | ||
649 | int | ||
650 | qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, | ||
651 | uint8_t vp_idx, uint16_t rid) | ||
652 | { | ||
653 | int ret = 0; | ||
654 | struct rsp_que *rsp = NULL; | ||
655 | struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); | ||
656 | uint16_t que_id = 0;; | ||
657 | |||
658 | rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); | ||
659 | if (rsp == NULL) { | ||
660 | qla_printk(KERN_WARNING, ha, "could not allocate memory for" | ||
661 | " response que\n"); | ||
662 | goto que_failed; | ||
663 | } | ||
664 | |||
665 | rsp->length = RESPONSE_ENTRY_CNT_2300; | ||
666 | rsp->ring = dma_alloc_coherent(&ha->pdev->dev, | ||
667 | (rsp->length + 1) * sizeof(response_t), | ||
668 | &rsp->dma, GFP_KERNEL); | ||
669 | if (rsp->ring == NULL) { | ||
670 | qla_printk(KERN_WARNING, ha, | ||
671 | "Memory Allocation failed - response_ring\n"); | ||
672 | goto que_failed; | ||
673 | } | ||
674 | |||
675 | mutex_lock(&ha->vport_lock); | ||
676 | que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); | ||
677 | if (que_id >= ha->max_queues) { | ||
678 | mutex_unlock(&ha->vport_lock); | ||
679 | qla_printk(KERN_INFO, ha, "No resources to create " | ||
680 | "additional response queue\n"); | ||
681 | goto que_failed; | ||
682 | } | ||
683 | set_bit(que_id, ha->rsp_qid_map); | ||
684 | |||
685 | if (ha->flags.msix_enabled) | ||
686 | rsp->msix = &ha->msix_entries[que_id + 1]; | ||
687 | else | ||
688 | qla_printk(KERN_WARNING, ha, "msix not enabled\n"); | ||
689 | |||
690 | ha->rsp_q_map[que_id] = rsp; | ||
691 | rsp->rid = rid; | ||
692 | rsp->vp_idx = vp_idx; | ||
693 | rsp->hw = ha; | ||
694 | /* Use alternate PCI bus number */ | ||
695 | if (MSB(rsp->rid)) | ||
696 | options |= BIT_4; | ||
697 | /* Use alternate PCI devfn */ | ||
698 | if (LSB(rsp->rid)) | ||
699 | options |= BIT_5; | ||
700 | rsp->options = options; | ||
701 | rsp->ring_ptr = rsp->ring; | ||
702 | rsp->ring_index = 0; | ||
703 | rsp->id = que_id; | ||
704 | mutex_unlock(&ha->vport_lock); | ||
705 | |||
706 | ret = qla25xx_request_irq(rsp); | ||
707 | if (ret) | ||
708 | goto que_failed; | ||
709 | |||
710 | ret = qla25xx_init_rsp_que(base_vha, rsp, options); | ||
711 | if (ret != QLA_SUCCESS) { | ||
712 | qla_printk(KERN_WARNING, ha, "%s failed\n", __func__); | ||
713 | mutex_lock(&ha->vport_lock); | ||
714 | clear_bit(que_id, ha->rsp_qid_map); | ||
715 | mutex_unlock(&ha->vport_lock); | ||
716 | goto que_failed; | ||
717 | } | ||
718 | |||
719 | qla2x00_init_response_q_entries(rsp); | ||
720 | |||
721 | return rsp->id; | ||
722 | |||
723 | que_failed: | ||
724 | qla25xx_free_rsp_que(base_vha, rsp); | ||
725 | return 0; | ||
726 | } | ||
727 | |||
728 | int | ||
729 | qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos) | ||
730 | { | ||
731 | uint16_t options = 0; | ||
732 | uint8_t ret = 0; | ||
733 | struct qla_hw_data *ha = vha->hw; | ||
734 | |||
735 | options |= BIT_1; | ||
736 | ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0); | ||
737 | if (!ret) { | ||
738 | qla_printk(KERN_WARNING, ha, "Response Que create failed\n"); | ||
739 | return ret; | ||
740 | } else | ||
741 | qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret); | ||
742 | |||
743 | options = 0; | ||
744 | if (qos & BIT_7) | ||
745 | options |= BIT_8; | ||
746 | ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret, | ||
747 | qos & ~BIT_7); | ||
748 | if (ret) { | ||
749 | vha->req_ques[0] = ret; | ||
750 | qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret); | ||
751 | } else | ||
752 | qla_printk(KERN_WARNING, ha, "Request Que create failed\n"); | ||
753 | |||
754 | return ret; | ||
755 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index f6365884c97b..9142025db3d8 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -92,7 +92,12 @@ MODULE_PARM_DESC(ql2xiidmaenable, | |||
92 | "Enables iIDMA settings " | 92 | "Enables iIDMA settings " |
93 | "Default is 1 - perform iIDMA. 0 - no iIDMA."); | 93 | "Default is 1 - perform iIDMA. 0 - no iIDMA."); |
94 | 94 | ||
95 | 95 | int ql2xmaxqueues = 1; | |
96 | module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR); | ||
97 | MODULE_PARM_DESC(ql2xmaxqueues, | ||
98 | "Enables MQ settings " | ||
99 | "Default is 1 for single queue. Set it to number \ | ||
100 | of queues in MQ mode."); | ||
96 | /* | 101 | /* |
97 | * SCSI host template entry points | 102 | * SCSI host template entry points |
98 | */ | 103 | */ |
@@ -210,11 +215,77 @@ static int qla2x00_do_dpc(void *data); | |||
210 | 215 | ||
211 | static void qla2x00_rst_aen(scsi_qla_host_t *); | 216 | static void qla2x00_rst_aen(scsi_qla_host_t *); |
212 | 217 | ||
213 | static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t); | 218 | static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, |
219 | struct req_que **, struct rsp_que **); | ||
214 | static void qla2x00_mem_free(struct qla_hw_data *); | 220 | static void qla2x00_mem_free(struct qla_hw_data *); |
215 | static void qla2x00_sp_free_dma(srb_t *); | 221 | static void qla2x00_sp_free_dma(srb_t *); |
216 | 222 | ||
217 | /* -------------------------------------------------------------------------- */ | 223 | /* -------------------------------------------------------------------------- */ |
224 | static int qla2x00_alloc_queues(struct qla_hw_data *ha) | ||
225 | { | ||
226 | ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues, | ||
227 | GFP_KERNEL); | ||
228 | if (!ha->req_q_map) { | ||
229 | qla_printk(KERN_WARNING, ha, | ||
230 | "Unable to allocate memory for request queue ptrs\n"); | ||
231 | goto fail_req_map; | ||
232 | } | ||
233 | |||
234 | ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues, | ||
235 | GFP_KERNEL); | ||
236 | if (!ha->rsp_q_map) { | ||
237 | qla_printk(KERN_WARNING, ha, | ||
238 | "Unable to allocate memory for response queue ptrs\n"); | ||
239 | goto fail_rsp_map; | ||
240 | } | ||
241 | set_bit(0, ha->rsp_qid_map); | ||
242 | set_bit(0, ha->req_qid_map); | ||
243 | return 1; | ||
244 | |||
245 | fail_rsp_map: | ||
246 | kfree(ha->req_q_map); | ||
247 | ha->req_q_map = NULL; | ||
248 | fail_req_map: | ||
249 | return -ENOMEM; | ||
250 | } | ||
251 | |||
252 | static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req, | ||
253 | struct rsp_que *rsp) | ||
254 | { | ||
255 | if (rsp && rsp->ring) | ||
256 | dma_free_coherent(&ha->pdev->dev, | ||
257 | (rsp->length + 1) * sizeof(response_t), | ||
258 | rsp->ring, rsp->dma); | ||
259 | |||
260 | kfree(rsp); | ||
261 | rsp = NULL; | ||
262 | if (req && req->ring) | ||
263 | dma_free_coherent(&ha->pdev->dev, | ||
264 | (req->length + 1) * sizeof(request_t), | ||
265 | req->ring, req->dma); | ||
266 | |||
267 | kfree(req); | ||
268 | req = NULL; | ||
269 | } | ||
270 | |||
271 | static void qla2x00_free_queues(struct qla_hw_data *ha) | ||
272 | { | ||
273 | struct req_que *req; | ||
274 | struct rsp_que *rsp; | ||
275 | int cnt; | ||
276 | |||
277 | for (cnt = 0; cnt < ha->max_queues; cnt++) { | ||
278 | rsp = ha->rsp_q_map[cnt]; | ||
279 | req = ha->req_q_map[cnt]; | ||
280 | qla2x00_free_que(ha, req, rsp); | ||
281 | } | ||
282 | kfree(ha->rsp_q_map); | ||
283 | ha->rsp_q_map = NULL; | ||
284 | |||
285 | kfree(ha->req_q_map); | ||
286 | ha->req_q_map = NULL; | ||
287 | } | ||
288 | |||
218 | static char * | 289 | static char * |
219 | qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str) | 290 | qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str) |
220 | { | 291 | { |
@@ -629,34 +700,40 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha) | |||
629 | void | 700 | void |
630 | qla2x00_abort_fcport_cmds(fc_port_t *fcport) | 701 | qla2x00_abort_fcport_cmds(fc_port_t *fcport) |
631 | { | 702 | { |
632 | int cnt; | 703 | int cnt, que, id; |
633 | unsigned long flags; | 704 | unsigned long flags; |
634 | srb_t *sp; | 705 | srb_t *sp; |
635 | scsi_qla_host_t *vha = fcport->vha; | 706 | scsi_qla_host_t *vha = fcport->vha; |
636 | struct qla_hw_data *ha = vha->hw; | 707 | struct qla_hw_data *ha = vha->hw; |
637 | struct req_que *req = ha->req; | 708 | struct req_que *req; |
638 | 709 | ||
639 | spin_lock_irqsave(&ha->hardware_lock, flags); | 710 | spin_lock_irqsave(&ha->hardware_lock, flags); |
640 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { | 711 | for (que = 0; que < QLA_MAX_HOST_QUES; que++) { |
641 | sp = req->outstanding_cmds[cnt]; | 712 | id = vha->req_ques[que]; |
642 | if (!sp) | 713 | req = ha->req_q_map[id]; |
643 | continue; | 714 | if (!req) |
644 | if (sp->fcport != fcport) | ||
645 | continue; | 715 | continue; |
716 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { | ||
717 | sp = req->outstanding_cmds[cnt]; | ||
718 | if (!sp) | ||
719 | continue; | ||
720 | if (sp->fcport != fcport) | ||
721 | continue; | ||
646 | 722 | ||
647 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 723 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
648 | if (ha->isp_ops->abort_command(vha, sp)) { | 724 | if (ha->isp_ops->abort_command(vha, sp, req)) { |
649 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
650 | "Abort failed -- %lx\n", sp->cmd->serial_number)); | ||
651 | } else { | ||
652 | if (qla2x00_eh_wait_on_command(sp->cmd) != | ||
653 | QLA_SUCCESS) | ||
654 | DEBUG2(qla_printk(KERN_WARNING, ha, | 725 | DEBUG2(qla_printk(KERN_WARNING, ha, |
655 | "Abort failed while waiting -- %lx\n", | 726 | "Abort failed -- %lx\n", |
656 | sp->cmd->serial_number)); | 727 | sp->cmd->serial_number)); |
657 | 728 | } else { | |
729 | if (qla2x00_eh_wait_on_command(sp->cmd) != | ||
730 | QLA_SUCCESS) | ||
731 | DEBUG2(qla_printk(KERN_WARNING, ha, | ||
732 | "Abort failed while waiting -- %lx\n", | ||
733 | sp->cmd->serial_number)); | ||
734 | } | ||
735 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
658 | } | 736 | } |
659 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
660 | } | 737 | } |
661 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 738 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
662 | } | 739 | } |
@@ -698,13 +775,13 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
698 | { | 775 | { |
699 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); | 776 | scsi_qla_host_t *vha = shost_priv(cmd->device->host); |
700 | srb_t *sp; | 777 | srb_t *sp; |
701 | int ret, i; | 778 | int ret, i, que; |
702 | unsigned int id, lun; | 779 | unsigned int id, lun; |
703 | unsigned long serial; | 780 | unsigned long serial; |
704 | unsigned long flags; | 781 | unsigned long flags; |
705 | int wait = 0; | 782 | int wait = 0; |
706 | struct qla_hw_data *ha = vha->hw; | 783 | struct qla_hw_data *ha = vha->hw; |
707 | struct req_que *req = ha->req; | 784 | struct req_que *req; |
708 | 785 | ||
709 | qla2x00_block_error_handler(cmd); | 786 | qla2x00_block_error_handler(cmd); |
710 | 787 | ||
@@ -719,31 +796,34 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) | |||
719 | 796 | ||
720 | /* Check active list for command command. */ | 797 | /* Check active list for command command. */ |
721 | spin_lock_irqsave(&ha->hardware_lock, flags); | 798 | spin_lock_irqsave(&ha->hardware_lock, flags); |
722 | for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) { | 799 | for (que = 0; que < QLA_MAX_HOST_QUES; que++) { |
723 | sp = req->outstanding_cmds[i]; | 800 | req = ha->req_q_map[vha->req_ques[que]]; |
724 | 801 | if (!req) | |
725 | if (sp == NULL) | ||
726 | continue; | 802 | continue; |
803 | for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) { | ||
804 | sp = req->outstanding_cmds[i]; | ||
727 | 805 | ||
728 | if (sp->cmd != cmd) | 806 | if (sp == NULL) |
729 | continue; | 807 | continue; |
730 | 808 | ||
731 | DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n", | 809 | if (sp->cmd != cmd) |
732 | __func__, vha->host_no, sp, serial)); | 810 | continue; |
733 | 811 | ||
734 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 812 | DEBUG2(printk("%s(%ld): aborting sp %p from RISC." |
735 | if (ha->isp_ops->abort_command(vha, sp)) { | 813 | " pid=%ld.\n", __func__, vha->host_no, sp, serial)); |
736 | DEBUG2(printk("%s(%ld): abort_command " | ||
737 | "mbx failed.\n", __func__, vha->host_no)); | ||
738 | ret = FAILED; | ||
739 | } else { | ||
740 | DEBUG3(printk("%s(%ld): abort_command " | ||
741 | "mbx success.\n", __func__, vha->host_no)); | ||
742 | wait = 1; | ||
743 | } | ||
744 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
745 | 814 | ||
746 | break; | 815 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
816 | if (ha->isp_ops->abort_command(vha, sp, req)) { | ||
817 | DEBUG2(printk("%s(%ld): abort_command " | ||
818 | "mbx failed.\n", __func__, vha->host_no)); | ||
819 | } else { | ||
820 | DEBUG3(printk("%s(%ld): abort_command " | ||
821 | "mbx success.\n", __func__, vha->host_no)); | ||
822 | wait = 1; | ||
823 | } | ||
824 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
825 | break; | ||
826 | } | ||
747 | } | 827 | } |
748 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 828 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
749 | 829 | ||
@@ -774,41 +854,46 @@ static int | |||
774 | qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, | 854 | qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, |
775 | unsigned int l, enum nexus_wait_type type) | 855 | unsigned int l, enum nexus_wait_type type) |
776 | { | 856 | { |
777 | int cnt, match, status; | 857 | int cnt, match, status, que; |
778 | srb_t *sp; | 858 | srb_t *sp; |
779 | unsigned long flags; | 859 | unsigned long flags; |
780 | struct qla_hw_data *ha = vha->hw; | 860 | struct qla_hw_data *ha = vha->hw; |
781 | struct req_que *req = ha->req; | 861 | struct req_que *req; |
782 | 862 | ||
783 | status = QLA_SUCCESS; | 863 | status = QLA_SUCCESS; |
784 | spin_lock_irqsave(&ha->hardware_lock, flags); | 864 | spin_lock_irqsave(&ha->hardware_lock, flags); |
785 | for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS; | 865 | for (que = 0; que < QLA_MAX_HOST_QUES; que++) { |
786 | cnt++) { | 866 | req = ha->req_q_map[vha->req_ques[que]]; |
787 | sp = req->outstanding_cmds[cnt]; | 867 | if (!req) |
788 | if (!sp) | ||
789 | continue; | 868 | continue; |
869 | for (cnt = 1; status == QLA_SUCCESS && | ||
870 | cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { | ||
871 | sp = req->outstanding_cmds[cnt]; | ||
872 | if (!sp) | ||
873 | continue; | ||
790 | 874 | ||
791 | if (vha->vp_idx != sp->fcport->vha->vp_idx) | 875 | if (vha->vp_idx != sp->fcport->vha->vp_idx) |
792 | continue; | 876 | continue; |
793 | match = 0; | 877 | match = 0; |
794 | switch (type) { | 878 | switch (type) { |
795 | case WAIT_HOST: | 879 | case WAIT_HOST: |
796 | match = 1; | 880 | match = 1; |
797 | break; | 881 | break; |
798 | case WAIT_TARGET: | 882 | case WAIT_TARGET: |
799 | match = sp->cmd->device->id == t; | 883 | match = sp->cmd->device->id == t; |
800 | break; | 884 | break; |
801 | case WAIT_LUN: | 885 | case WAIT_LUN: |
802 | match = (sp->cmd->device->id == t && | 886 | match = (sp->cmd->device->id == t && |
803 | sp->cmd->device->lun == l); | 887 | sp->cmd->device->lun == l); |
804 | break; | 888 | break; |
805 | } | 889 | } |
806 | if (!match) | 890 | if (!match) |
807 | continue; | 891 | continue; |
808 | 892 | ||
809 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 893 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
810 | status = qla2x00_eh_wait_on_command(sp->cmd); | 894 | status = qla2x00_eh_wait_on_command(sp->cmd); |
811 | spin_lock_irqsave(&ha->hardware_lock, flags); | 895 | spin_lock_irqsave(&ha->hardware_lock, flags); |
896 | } | ||
812 | } | 897 | } |
813 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 898 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
814 | 899 | ||
@@ -1074,7 +1159,6 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) | |||
1074 | } | 1159 | } |
1075 | } | 1160 | } |
1076 | } | 1161 | } |
1077 | |||
1078 | /* Issue marker command only when we are going to start the I/O */ | 1162 | /* Issue marker command only when we are going to start the I/O */ |
1079 | vha->marker_needed = 1; | 1163 | vha->marker_needed = 1; |
1080 | 1164 | ||
@@ -1084,19 +1168,24 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) | |||
1084 | void | 1168 | void |
1085 | qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | 1169 | qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) |
1086 | { | 1170 | { |
1087 | int cnt; | 1171 | int que, cnt; |
1088 | unsigned long flags; | 1172 | unsigned long flags; |
1089 | srb_t *sp; | 1173 | srb_t *sp; |
1090 | struct qla_hw_data *ha = vha->hw; | 1174 | struct qla_hw_data *ha = vha->hw; |
1091 | struct req_que *req = ha->req; | 1175 | struct req_que *req; |
1092 | 1176 | ||
1093 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1177 | spin_lock_irqsave(&ha->hardware_lock, flags); |
1094 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { | 1178 | for (que = 0; que < QLA_MAX_HOST_QUES; que++) { |
1095 | sp = req->outstanding_cmds[cnt]; | 1179 | req = ha->req_q_map[vha->req_ques[que]]; |
1096 | if (sp) { | 1180 | if (!req) |
1097 | req->outstanding_cmds[cnt] = NULL; | 1181 | continue; |
1098 | sp->cmd->result = res; | 1182 | for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { |
1099 | qla2x00_sp_compl(vha, sp); | 1183 | sp = req->outstanding_cmds[cnt]; |
1184 | if (sp && sp->vha == vha) { | ||
1185 | req->outstanding_cmds[cnt] = NULL; | ||
1186 | sp->cmd->result = res; | ||
1187 | qla2x00_sp_compl(ha, sp); | ||
1188 | } | ||
1100 | } | 1189 | } |
1101 | } | 1190 | } |
1102 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1191 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
@@ -1121,11 +1210,12 @@ qla2xxx_slave_configure(struct scsi_device *sdev) | |||
1121 | scsi_qla_host_t *vha = shost_priv(sdev->host); | 1210 | scsi_qla_host_t *vha = shost_priv(sdev->host); |
1122 | struct qla_hw_data *ha = vha->hw; | 1211 | struct qla_hw_data *ha = vha->hw; |
1123 | struct fc_rport *rport = starget_to_rport(sdev->sdev_target); | 1212 | struct fc_rport *rport = starget_to_rport(sdev->sdev_target); |
1213 | struct req_que *req = ha->req_q_map[0]; | ||
1124 | 1214 | ||
1125 | if (sdev->tagged_supported) | 1215 | if (sdev->tagged_supported) |
1126 | scsi_activate_tcq(sdev, ha->req->max_q_depth); | 1216 | scsi_activate_tcq(sdev, req->max_q_depth); |
1127 | else | 1217 | else |
1128 | scsi_deactivate_tcq(sdev, ha->req->max_q_depth); | 1218 | scsi_deactivate_tcq(sdev, req->max_q_depth); |
1129 | 1219 | ||
1130 | rport->dev_loss_tmo = ha->port_down_retry_count; | 1220 | rport->dev_loss_tmo = ha->port_down_retry_count; |
1131 | 1221 | ||
@@ -1471,6 +1561,7 @@ static int | |||
1471 | qla2x00_iospace_config(struct qla_hw_data *ha) | 1561 | qla2x00_iospace_config(struct qla_hw_data *ha) |
1472 | { | 1562 | { |
1473 | resource_size_t pio; | 1563 | resource_size_t pio; |
1564 | uint16_t msix; | ||
1474 | 1565 | ||
1475 | if (pci_request_selected_regions(ha->pdev, ha->bars, | 1566 | if (pci_request_selected_regions(ha->pdev, ha->bars, |
1476 | QLA2XXX_DRIVER_NAME)) { | 1567 | QLA2XXX_DRIVER_NAME)) { |
@@ -1523,6 +1614,29 @@ skip_pio: | |||
1523 | goto iospace_error_exit; | 1614 | goto iospace_error_exit; |
1524 | } | 1615 | } |
1525 | 1616 | ||
1617 | /* Determine queue resources */ | ||
1618 | ha->max_queues = 1; | ||
1619 | if (ql2xmaxqueues > 1) { | ||
1620 | ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), | ||
1621 | pci_resource_len(ha->pdev, 3)); | ||
1622 | if (ha->mqiobase) { | ||
1623 | /* Read MSIX vector size of the board */ | ||
1624 | pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, | ||
1625 | &msix); | ||
1626 | ha->msix_count = msix; | ||
1627 | /* Max queues are bounded by available msix vectors */ | ||
1628 | /* queue 0 uses two msix vectors */ | ||
1629 | if (ha->msix_count - 1 < ql2xmaxqueues) | ||
1630 | ha->max_queues = ha->msix_count - 1; | ||
1631 | else if (ql2xmaxqueues > QLA_MQ_SIZE) | ||
1632 | ha->max_queues = QLA_MQ_SIZE; | ||
1633 | else | ||
1634 | ha->max_queues = ql2xmaxqueues; | ||
1635 | qla_printk(KERN_INFO, ha, | ||
1636 | "MSI-X vector count: %d\n", msix); | ||
1637 | } | ||
1638 | } | ||
1639 | ha->msix_count = ha->max_queues + 1; | ||
1526 | return (0); | 1640 | return (0); |
1527 | 1641 | ||
1528 | iospace_error_exit: | 1642 | iospace_error_exit: |
@@ -1568,6 +1682,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1568 | struct scsi_host_template *sht; | 1682 | struct scsi_host_template *sht; |
1569 | int bars, max_id, mem_only = 0; | 1683 | int bars, max_id, mem_only = 0; |
1570 | uint16_t req_length = 0, rsp_length = 0; | 1684 | uint16_t req_length = 0, rsp_length = 0; |
1685 | struct req_que *req = NULL; | ||
1686 | struct rsp_que *rsp = NULL; | ||
1571 | 1687 | ||
1572 | bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); | 1688 | bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); |
1573 | sht = &qla2x00_driver_template; | 1689 | sht = &qla2x00_driver_template; |
@@ -1655,6 +1771,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1655 | ha->init_cb_size = sizeof(struct mid_init_cb_24xx); | 1771 | ha->init_cb_size = sizeof(struct mid_init_cb_24xx); |
1656 | ha->gid_list_info_size = 8; | 1772 | ha->gid_list_info_size = 8; |
1657 | ha->optrom_size = OPTROM_SIZE_24XX; | 1773 | ha->optrom_size = OPTROM_SIZE_24XX; |
1774 | ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX; | ||
1658 | ha->isp_ops = &qla24xx_isp_ops; | 1775 | ha->isp_ops = &qla24xx_isp_ops; |
1659 | } else if (IS_QLA25XX(ha)) { | 1776 | } else if (IS_QLA25XX(ha)) { |
1660 | ha->mbx_count = MAILBOX_REGISTER_COUNT; | 1777 | ha->mbx_count = MAILBOX_REGISTER_COUNT; |
@@ -1664,6 +1781,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1664 | ha->init_cb_size = sizeof(struct mid_init_cb_24xx); | 1781 | ha->init_cb_size = sizeof(struct mid_init_cb_24xx); |
1665 | ha->gid_list_info_size = 8; | 1782 | ha->gid_list_info_size = 8; |
1666 | ha->optrom_size = OPTROM_SIZE_25XX; | 1783 | ha->optrom_size = OPTROM_SIZE_25XX; |
1784 | ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; | ||
1667 | ha->isp_ops = &qla25xx_isp_ops; | 1785 | ha->isp_ops = &qla25xx_isp_ops; |
1668 | } | 1786 | } |
1669 | 1787 | ||
@@ -1674,7 +1792,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1674 | 1792 | ||
1675 | set_bit(0, (unsigned long *) ha->vp_idx_map); | 1793 | set_bit(0, (unsigned long *) ha->vp_idx_map); |
1676 | 1794 | ||
1677 | ret = qla2x00_mem_alloc(ha, req_length, rsp_length); | 1795 | ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); |
1678 | if (!ret) { | 1796 | if (!ret) { |
1679 | qla_printk(KERN_WARNING, ha, | 1797 | qla_printk(KERN_WARNING, ha, |
1680 | "[ERROR] Failed to allocate memory for adapter\n"); | 1798 | "[ERROR] Failed to allocate memory for adapter\n"); |
@@ -1682,9 +1800,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1682 | goto probe_hw_failed; | 1800 | goto probe_hw_failed; |
1683 | } | 1801 | } |
1684 | 1802 | ||
1685 | ha->req->max_q_depth = MAX_Q_DEPTH; | 1803 | req->max_q_depth = MAX_Q_DEPTH; |
1686 | if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) | 1804 | if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) |
1687 | ha->req->max_q_depth = ql2xmaxqdepth; | 1805 | req->max_q_depth = ql2xmaxqdepth; |
1806 | |||
1688 | 1807 | ||
1689 | base_vha = qla2x00_create_host(sht, ha); | 1808 | base_vha = qla2x00_create_host(sht, ha); |
1690 | if (!base_vha) { | 1809 | if (!base_vha) { |
@@ -1700,13 +1819,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1700 | qla2x00_config_dma_addressing(base_vha); | 1819 | qla2x00_config_dma_addressing(base_vha); |
1701 | 1820 | ||
1702 | host = base_vha->host; | 1821 | host = base_vha->host; |
1703 | host->can_queue = ha->req->length + 128; | 1822 | base_vha->req_ques[0] = req->id; |
1704 | if (IS_QLA2XXX_MIDTYPE(ha)) { | 1823 | host->can_queue = req->length + 128; |
1824 | if (IS_QLA2XXX_MIDTYPE(ha)) | ||
1705 | base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; | 1825 | base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; |
1706 | } else { | 1826 | else |
1707 | base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + | 1827 | base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + |
1708 | base_vha->vp_idx; | 1828 | base_vha->vp_idx; |
1709 | } | ||
1710 | if (IS_QLA2100(ha)) | 1829 | if (IS_QLA2100(ha)) |
1711 | host->sg_tablesize = 32; | 1830 | host->sg_tablesize = 32; |
1712 | host->max_id = max_id; | 1831 | host->max_id = max_id; |
@@ -1718,6 +1837,21 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1718 | host->max_lun = MAX_LUNS; | 1837 | host->max_lun = MAX_LUNS; |
1719 | host->transportt = qla2xxx_transport_template; | 1838 | host->transportt = qla2xxx_transport_template; |
1720 | 1839 | ||
1840 | /* Set up the irqs */ | ||
1841 | ret = qla2x00_request_irqs(ha, rsp); | ||
1842 | if (ret) | ||
1843 | goto probe_failed; | ||
1844 | |||
1845 | /* Alloc arrays of request and response ring ptrs */ | ||
1846 | if (!qla2x00_alloc_queues(ha)) { | ||
1847 | qla_printk(KERN_WARNING, ha, | ||
1848 | "[ERROR] Failed to allocate memory for queue" | ||
1849 | " pointers\n"); | ||
1850 | goto probe_failed; | ||
1851 | } | ||
1852 | ha->rsp_q_map[0] = rsp; | ||
1853 | ha->req_q_map[0] = req; | ||
1854 | |||
1721 | if (qla2x00_initialize_adapter(base_vha)) { | 1855 | if (qla2x00_initialize_adapter(base_vha)) { |
1722 | qla_printk(KERN_WARNING, ha, | 1856 | qla_printk(KERN_WARNING, ha, |
1723 | "Failed to initialize adapter\n"); | 1857 | "Failed to initialize adapter\n"); |
@@ -1730,11 +1864,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1730 | goto probe_failed; | 1864 | goto probe_failed; |
1731 | } | 1865 | } |
1732 | 1866 | ||
1733 | /* Set up the irqs */ | ||
1734 | ret = qla2x00_request_irqs(ha); | ||
1735 | if (ret) | ||
1736 | goto probe_failed; | ||
1737 | |||
1738 | /* | 1867 | /* |
1739 | * Startup the kernel thread for this host adapter | 1868 | * Startup the kernel thread for this host adapter |
1740 | */ | 1869 | */ |
@@ -1786,6 +1915,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1786 | return 0; | 1915 | return 0; |
1787 | 1916 | ||
1788 | probe_failed: | 1917 | probe_failed: |
1918 | qla2x00_free_que(ha, req, rsp); | ||
1789 | qla2x00_free_device(base_vha); | 1919 | qla2x00_free_device(base_vha); |
1790 | 1920 | ||
1791 | scsi_host_put(base_vha->host); | 1921 | scsi_host_put(base_vha->host); |
@@ -1836,6 +1966,9 @@ qla2x00_remove_one(struct pci_dev *pdev) | |||
1836 | if (ha->iobase) | 1966 | if (ha->iobase) |
1837 | iounmap(ha->iobase); | 1967 | iounmap(ha->iobase); |
1838 | 1968 | ||
1969 | if (ha->mqiobase) | ||
1970 | iounmap(ha->mqiobase); | ||
1971 | |||
1839 | pci_release_selected_regions(ha->pdev, ha->bars); | 1972 | pci_release_selected_regions(ha->pdev, ha->bars); |
1840 | kfree(ha); | 1973 | kfree(ha); |
1841 | ha = NULL; | 1974 | ha = NULL; |
@@ -1884,6 +2017,8 @@ qla2x00_free_device(scsi_qla_host_t *vha) | |||
1884 | qla2x00_free_irqs(vha); | 2017 | qla2x00_free_irqs(vha); |
1885 | 2018 | ||
1886 | qla2x00_mem_free(ha); | 2019 | qla2x00_mem_free(ha); |
2020 | |||
2021 | qla2x00_free_queues(ha); | ||
1887 | } | 2022 | } |
1888 | 2023 | ||
1889 | static inline void | 2024 | static inline void |
@@ -1998,11 +2133,10 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) | |||
1998 | * !0 = failure. | 2133 | * !0 = failure. |
1999 | */ | 2134 | */ |
2000 | static int | 2135 | static int |
2001 | qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len) | 2136 | qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, |
2137 | struct req_que **req, struct rsp_que **rsp) | ||
2002 | { | 2138 | { |
2003 | char name[16]; | 2139 | char name[16]; |
2004 | struct req_que *req = NULL; | ||
2005 | struct rsp_que *rsp = NULL; | ||
2006 | 2140 | ||
2007 | ha->init_cb_size = sizeof(init_cb_t); | 2141 | ha->init_cb_size = sizeof(init_cb_t); |
2008 | if (IS_QLA2XXX_MIDTYPE(ha)) | 2142 | if (IS_QLA2XXX_MIDTYPE(ha)) |
@@ -2055,52 +2189,67 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len) | |||
2055 | } | 2189 | } |
2056 | 2190 | ||
2057 | /* Allocate memory for request ring */ | 2191 | /* Allocate memory for request ring */ |
2058 | req = kzalloc(sizeof(struct req_que), GFP_KERNEL); | 2192 | *req = kzalloc(sizeof(struct req_que), GFP_KERNEL); |
2059 | if (!req) { | 2193 | if (!*req) { |
2060 | DEBUG(printk("Unable to allocate memory for req\n")); | 2194 | DEBUG(printk("Unable to allocate memory for req\n")); |
2061 | goto fail_req; | 2195 | goto fail_req; |
2062 | } | 2196 | } |
2063 | ha->req = req; | 2197 | (*req)->length = req_len; |
2064 | req->length = req_len; | 2198 | (*req)->ring = dma_alloc_coherent(&ha->pdev->dev, |
2065 | req->ring = dma_alloc_coherent(&ha->pdev->dev, | 2199 | ((*req)->length + 1) * sizeof(request_t), |
2066 | (req->length + 1) * sizeof(request_t), | 2200 | &(*req)->dma, GFP_KERNEL); |
2067 | &req->dma, GFP_KERNEL); | 2201 | if (!(*req)->ring) { |
2068 | if (!req->ring) { | ||
2069 | DEBUG(printk("Unable to allocate memory for req_ring\n")); | 2202 | DEBUG(printk("Unable to allocate memory for req_ring\n")); |
2070 | goto fail_req_ring; | 2203 | goto fail_req_ring; |
2071 | } | 2204 | } |
2072 | /* Allocate memory for response ring */ | 2205 | /* Allocate memory for response ring */ |
2073 | rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); | 2206 | *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); |
2074 | if (!rsp) { | 2207 | if (!*rsp) { |
2075 | DEBUG(printk("Unable to allocate memory for rsp\n")); | 2208 | qla_printk(KERN_WARNING, ha, |
2209 | "Unable to allocate memory for rsp\n"); | ||
2076 | goto fail_rsp; | 2210 | goto fail_rsp; |
2077 | } | 2211 | } |
2078 | ha->rsp = rsp; | 2212 | (*rsp)->hw = ha; |
2079 | rsp->hw = ha; | 2213 | (*rsp)->length = rsp_len; |
2080 | rsp->length = rsp_len; | 2214 | (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev, |
2081 | 2215 | ((*rsp)->length + 1) * sizeof(response_t), | |
2082 | rsp->ring = dma_alloc_coherent(&ha->pdev->dev, | 2216 | &(*rsp)->dma, GFP_KERNEL); |
2083 | (rsp->length + 1) * sizeof(response_t), | 2217 | if (!(*rsp)->ring) { |
2084 | &rsp->dma, GFP_KERNEL); | 2218 | qla_printk(KERN_WARNING, ha, |
2085 | if (!rsp->ring) { | 2219 | "Unable to allocate memory for rsp_ring\n"); |
2086 | DEBUG(printk("Unable to allocate memory for rsp_ring\n")); | ||
2087 | goto fail_rsp_ring; | 2220 | goto fail_rsp_ring; |
2088 | } | 2221 | } |
2222 | (*req)->rsp = *rsp; | ||
2223 | (*rsp)->req = *req; | ||
2224 | /* Allocate memory for NVRAM data for vports */ | ||
2225 | if (ha->nvram_npiv_size) { | ||
2226 | ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) * | ||
2227 | ha->nvram_npiv_size, GFP_KERNEL); | ||
2228 | if (!ha->npiv_info) { | ||
2229 | qla_printk(KERN_WARNING, ha, | ||
2230 | "Unable to allocate memory for npiv info\n"); | ||
2231 | goto fail_npiv_info; | ||
2232 | } | ||
2233 | } else | ||
2234 | ha->npiv_info = NULL; | ||
2089 | 2235 | ||
2090 | INIT_LIST_HEAD(&ha->vp_list); | 2236 | INIT_LIST_HEAD(&ha->vp_list); |
2091 | return 1; | 2237 | return 1; |
2092 | 2238 | ||
2239 | fail_npiv_info: | ||
2240 | dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) * | ||
2241 | sizeof(response_t), (*rsp)->ring, (*rsp)->dma); | ||
2242 | (*rsp)->ring = NULL; | ||
2243 | (*rsp)->dma = 0; | ||
2093 | fail_rsp_ring: | 2244 | fail_rsp_ring: |
2094 | kfree(rsp); | 2245 | kfree(*rsp); |
2095 | ha->rsp = NULL; | ||
2096 | fail_rsp: | 2246 | fail_rsp: |
2097 | dma_free_coherent(&ha->pdev->dev, (req->length + 1) * | 2247 | dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) * |
2098 | sizeof(request_t), req->ring, req->dma); | 2248 | sizeof(request_t), (*req)->ring, (*req)->dma); |
2099 | req->ring = NULL; | 2249 | (*req)->ring = NULL; |
2100 | req->dma = 0; | 2250 | (*req)->dma = 0; |
2101 | fail_req_ring: | 2251 | fail_req_ring: |
2102 | kfree(req); | 2252 | kfree(*req); |
2103 | ha->req = NULL; | ||
2104 | fail_req: | 2253 | fail_req: |
2105 | dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), | 2254 | dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), |
2106 | ha->ct_sns, ha->ct_sns_dma); | 2255 | ha->ct_sns, ha->ct_sns_dma); |
@@ -2144,9 +2293,6 @@ fail: | |||
2144 | static void | 2293 | static void |
2145 | qla2x00_mem_free(struct qla_hw_data *ha) | 2294 | qla2x00_mem_free(struct qla_hw_data *ha) |
2146 | { | 2295 | { |
2147 | struct req_que *req = ha->req; | ||
2148 | struct rsp_que *rsp = ha->rsp; | ||
2149 | |||
2150 | if (ha->srb_mempool) | 2296 | if (ha->srb_mempool) |
2151 | mempool_destroy(ha->srb_mempool); | 2297 | mempool_destroy(ha->srb_mempool); |
2152 | 2298 | ||
@@ -2189,6 +2335,7 @@ qla2x00_mem_free(struct qla_hw_data *ha) | |||
2189 | ha->init_cb, ha->init_cb_dma); | 2335 | ha->init_cb, ha->init_cb_dma); |
2190 | vfree(ha->optrom_buffer); | 2336 | vfree(ha->optrom_buffer); |
2191 | kfree(ha->nvram); | 2337 | kfree(ha->nvram); |
2338 | kfree(ha->npiv_info); | ||
2192 | 2339 | ||
2193 | ha->srb_mempool = NULL; | 2340 | ha->srb_mempool = NULL; |
2194 | ha->eft = NULL; | 2341 | ha->eft = NULL; |
@@ -2210,26 +2357,6 @@ qla2x00_mem_free(struct qla_hw_data *ha) | |||
2210 | ha->fw_dump = NULL; | 2357 | ha->fw_dump = NULL; |
2211 | ha->fw_dumped = 0; | 2358 | ha->fw_dumped = 0; |
2212 | ha->fw_dump_reading = 0; | 2359 | ha->fw_dump_reading = 0; |
2213 | |||
2214 | if (rsp) { | ||
2215 | if (rsp->ring) | ||
2216 | dma_free_coherent(&ha->pdev->dev, | ||
2217 | (rsp->length + 1) * sizeof(response_t), | ||
2218 | rsp->ring, rsp->dma); | ||
2219 | |||
2220 | kfree(rsp); | ||
2221 | rsp = NULL; | ||
2222 | } | ||
2223 | |||
2224 | if (req) { | ||
2225 | if (req->ring) | ||
2226 | dma_free_coherent(&ha->pdev->dev, | ||
2227 | (req->length + 1) * sizeof(request_t), | ||
2228 | req->ring, req->dma); | ||
2229 | |||
2230 | kfree(req); | ||
2231 | req = NULL; | ||
2232 | } | ||
2233 | } | 2360 | } |
2234 | 2361 | ||
2235 | struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, | 2362 | struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, |
@@ -2613,9 +2740,8 @@ qla2x00_sp_free_dma(srb_t *sp) | |||
2613 | } | 2740 | } |
2614 | 2741 | ||
2615 | void | 2742 | void |
2616 | qla2x00_sp_compl(scsi_qla_host_t *vha, srb_t *sp) | 2743 | qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp) |
2617 | { | 2744 | { |
2618 | struct qla_hw_data *ha = vha->hw; | ||
2619 | struct scsi_cmnd *cmd = sp->cmd; | 2745 | struct scsi_cmnd *cmd = sp->cmd; |
2620 | 2746 | ||
2621 | qla2x00_sp_free_dma(sp); | 2747 | qla2x00_sp_free_dma(sp); |
@@ -2643,7 +2769,7 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
2643 | srb_t *sp; | 2769 | srb_t *sp; |
2644 | int t; | 2770 | int t; |
2645 | struct qla_hw_data *ha = vha->hw; | 2771 | struct qla_hw_data *ha = vha->hw; |
2646 | struct req_que *req = ha->req; | 2772 | struct req_que *req; |
2647 | /* | 2773 | /* |
2648 | * Ports - Port down timer. | 2774 | * Ports - Port down timer. |
2649 | * | 2775 | * |
@@ -2693,6 +2819,7 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
2693 | if (!vha->vp_idx) { | 2819 | if (!vha->vp_idx) { |
2694 | spin_lock_irqsave(&ha->hardware_lock, | 2820 | spin_lock_irqsave(&ha->hardware_lock, |
2695 | cpu_flags); | 2821 | cpu_flags); |
2822 | req = ha->req_q_map[0]; | ||
2696 | for (index = 1; | 2823 | for (index = 1; |
2697 | index < MAX_OUTSTANDING_COMMANDS; | 2824 | index < MAX_OUTSTANDING_COMMANDS; |
2698 | index++) { | 2825 | index++) { |
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 6d6c02129a53..c538ee1b1a31 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c | |||
@@ -547,7 +547,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) | |||
547 | uint16_t cnt, chksum, *wptr; | 547 | uint16_t cnt, chksum, *wptr; |
548 | struct qla_flt_location *fltl; | 548 | struct qla_flt_location *fltl; |
549 | struct qla_hw_data *ha = vha->hw; | 549 | struct qla_hw_data *ha = vha->hw; |
550 | struct req_que *req = ha->req; | 550 | struct req_que *req = ha->req_q_map[0]; |
551 | 551 | ||
552 | /* | 552 | /* |
553 | * FLT-location structure resides after the last PCI region. | 553 | * FLT-location structure resides after the last PCI region. |
@@ -624,7 +624,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) | |||
624 | struct qla_flt_header *flt; | 624 | struct qla_flt_header *flt; |
625 | struct qla_flt_region *region; | 625 | struct qla_flt_region *region; |
626 | struct qla_hw_data *ha = vha->hw; | 626 | struct qla_hw_data *ha = vha->hw; |
627 | struct req_que *req = ha->req; | 627 | struct req_que *req = ha->req_q_map[0]; |
628 | 628 | ||
629 | ha->flt_region_flt = flt_addr; | 629 | ha->flt_region_flt = flt_addr; |
630 | wptr = (uint16_t *)req->ring; | 630 | wptr = (uint16_t *)req->ring; |
@@ -730,7 +730,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha) | |||
730 | uint8_t man_id, flash_id; | 730 | uint8_t man_id, flash_id; |
731 | uint16_t mid, fid; | 731 | uint16_t mid, fid; |
732 | struct qla_hw_data *ha = vha->hw; | 732 | struct qla_hw_data *ha = vha->hw; |
733 | struct req_que *req = ha->req; | 733 | struct req_que *req = ha->req_q_map[0]; |
734 | 734 | ||
735 | wptr = (uint16_t *)req->ring; | 735 | wptr = (uint16_t *)req->ring; |
736 | fdt = (struct qla_fdt_layout *)req->ring; | 736 | fdt = (struct qla_fdt_layout *)req->ring; |
@@ -833,6 +833,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) | |||
833 | void *data; | 833 | void *data; |
834 | uint16_t *wptr; | 834 | uint16_t *wptr; |
835 | uint16_t cnt, chksum; | 835 | uint16_t cnt, chksum; |
836 | int i; | ||
836 | struct qla_npiv_header hdr; | 837 | struct qla_npiv_header hdr; |
837 | struct qla_npiv_entry *entry; | 838 | struct qla_npiv_entry *entry; |
838 | struct qla_hw_data *ha = vha->hw; | 839 | struct qla_hw_data *ha = vha->hw; |
@@ -876,7 +877,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) | |||
876 | 877 | ||
877 | entry = data + sizeof(struct qla_npiv_header); | 878 | entry = data + sizeof(struct qla_npiv_header); |
878 | cnt = le16_to_cpu(hdr.entries); | 879 | cnt = le16_to_cpu(hdr.entries); |
879 | for ( ; cnt; cnt--, entry++) { | 880 | for (i = 0; cnt; cnt--, entry++, i++) { |
880 | uint16_t flags; | 881 | uint16_t flags; |
881 | struct fc_vport_identifiers vid; | 882 | struct fc_vport_identifiers vid; |
882 | struct fc_vport *vport; | 883 | struct fc_vport *vport; |
@@ -894,19 +895,25 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) | |||
894 | vid.port_name = wwn_to_u64(entry->port_name); | 895 | vid.port_name = wwn_to_u64(entry->port_name); |
895 | vid.node_name = wwn_to_u64(entry->node_name); | 896 | vid.node_name = wwn_to_u64(entry->node_name); |
896 | 897 | ||
898 | memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry)); | ||
899 | |||
897 | DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx " | 900 | DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx " |
898 | "wwnn=%llx vf_id=0x%x qos=0x%x.\n", cnt, vid.port_name, | 901 | "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, |
899 | vid.node_name, le16_to_cpu(entry->vf_id), | 902 | vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id), |
900 | le16_to_cpu(entry->qos))); | 903 | entry->q_qos, entry->f_qos)); |
901 | 904 | ||
902 | vport = fc_vport_create(vha->host, 0, &vid); | 905 | if (i < QLA_PRECONFIG_VPORTS) { |
903 | if (!vport) | 906 | vport = fc_vport_create(vha->host, 0, &vid); |
904 | qla_printk(KERN_INFO, ha, "NPIV-Config: Failed to " | 907 | if (!vport) |
905 | "create vport [%02x]: wwpn=%llx wwnn=%llx.\n", cnt, | 908 | qla_printk(KERN_INFO, ha, |
906 | vid.port_name, vid.node_name); | 909 | "NPIV-Config: Failed to create vport [%02x]: " |
910 | "wwpn=%llx wwnn=%llx.\n", cnt, | ||
911 | vid.port_name, vid.node_name); | ||
912 | } | ||
907 | } | 913 | } |
908 | done: | 914 | done: |
909 | kfree(data); | 915 | kfree(data); |
916 | ha->npiv_info = NULL; | ||
910 | } | 917 | } |
911 | 918 | ||
912 | static void | 919 | static void |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 54b1100810b4..be22f3a09f8d 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,9 +7,9 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.02.02-k1" | 10 | #define QLA2XXX_VERSION "8.02.03-k1" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 2 | 13 | #define QLA_DRIVER_MINOR_VER 2 |
14 | #define QLA_DRIVER_PATCH_VER 2 | 14 | #define QLA_DRIVER_PATCH_VER 3 |
15 | #define QLA_DRIVER_BETA_VER 0 | 15 | #define QLA_DRIVER_BETA_VER 0 |