aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAnirban Chakraborty <anirban.chakraborty@qlogic.com>2009-04-07 01:33:40 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-05-20 18:21:07 -0400
commit2afa19a9377ca61b9489e44bf50029574fbe63be (patch)
treecdfa3878eb04d833bbcd9ce92196bc4456b5ccf5 /drivers
parent7640335ea5b1a2da0d64303e6003012c619ae01a (diff)
[SCSI] qla2xxx: Add QoS support.
Set the number of request queues to the module paramater ql2xmaxqueues. Each vport gets a request queue. The QoS value set to the request queues determines priority control for queued IOs. If QoS value is not specified, the vports use the default queue 0. Signed-off-by: Anirban Chakraborty <anirban.chakraborty@qlogic.com> Signed-off-by: Andrew Vasquez <andrew.vasquez@qlogic.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c47
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h23
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h27
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c30
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c19
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c242
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c63
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c107
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c135
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c1
11 files changed, 333 insertions, 368 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 5d44e3e6488c..bda6658d4fbf 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1469,11 +1469,12 @@ static int
1469qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) 1469qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1470{ 1470{
1471 int ret = 0; 1471 int ret = 0;
1472 int cnt = 0; 1472 uint8_t qos = 0;
1473 uint8_t qos = QLA_DEFAULT_QUE_QOS;
1474 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 1473 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1475 scsi_qla_host_t *vha = NULL; 1474 scsi_qla_host_t *vha = NULL;
1476 struct qla_hw_data *ha = base_vha->hw; 1475 struct qla_hw_data *ha = base_vha->hw;
1476 uint16_t options = 0;
1477 int cnt;
1477 1478
1478 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1479 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1479 if (ret) { 1480 if (ret) {
@@ -1529,23 +1530,35 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1529 1530
1530 qla24xx_vport_disable(fc_vport, disable); 1531 qla24xx_vport_disable(fc_vport, disable);
1531 1532
1532 /* Create a queue pair for the vport */ 1533 ret = 0;
1533 if (ha->mqenable) { 1534 if (ha->cur_vport_count <= ha->flex_port_count
1534 if (ha->npiv_info) { 1535 || ha->max_req_queues == 1 || !ha->npiv_info)
1535 for (; cnt < ha->nvram_npiv_size; cnt++) { 1536 goto vport_queue;
1536 if (ha->npiv_info[cnt].port_name == 1537 /* Create a request queue in QoS mode for the vport */
1537 vha->port_name && 1538 for (cnt = ha->flex_port_count; cnt < ha->nvram_npiv_size; cnt++) {
1538 ha->npiv_info[cnt].node_name == 1539 if (ha->npiv_info[cnt].port_name == vha->port_name &&
1539 vha->node_name) { 1540 ha->npiv_info[cnt].node_name == vha->node_name) {
1540 qos = ha->npiv_info[cnt].q_qos; 1541 qos = ha->npiv_info[cnt].q_qos;
1541 break; 1542 break;
1542 }
1543 }
1544 } 1543 }
1545 qla25xx_create_queues(vha, qos); 1544 }
1545 if (qos) {
1546 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1547 qos);
1548 if (!ret)
1549 qla_printk(KERN_WARNING, ha,
1550 "Can't create request queue for vp_idx:%d\n",
1551 vha->vp_idx);
1552 else
1553 DEBUG2(qla_printk(KERN_INFO, ha,
1554 "Request Que:%d created for vp_idx:%d\n",
1555 ret, vha->vp_idx));
1546 } 1556 }
1547 1557
1558vport_queue:
1559 vha->req = ha->req_q_map[ret];
1548 return 0; 1560 return 0;
1561
1549vport_create_failed_2: 1562vport_create_failed_2:
1550 qla24xx_disable_vp(vha); 1563 qla24xx_disable_vp(vha);
1551 qla24xx_deallocate_vp_id(vha); 1564 qla24xx_deallocate_vp_id(vha);
@@ -1586,8 +1599,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1586 vha->host_no, vha->vp_idx, vha)); 1599 vha->host_no, vha->vp_idx, vha));
1587 } 1600 }
1588 1601
1589 if (ha->mqenable) { 1602 if (vha->req->id) {
1590 if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS) 1603 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1591 qla_printk(KERN_WARNING, ha, 1604 qla_printk(KERN_WARNING, ha,
1592 "Queue delete failed.\n"); 1605 "Queue delete failed.\n");
1593 } 1606 }
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 34760f8d4f17..68671a2b8b7f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -351,7 +351,7 @@ static inline void *
351qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 351qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
352{ 352{
353 uint32_t cnt, que_idx; 353 uint32_t cnt, que_idx;
354 uint8_t req_cnt, rsp_cnt, que_cnt; 354 uint8_t que_cnt;
355 struct qla2xxx_mq_chain *mq = ptr; 355 struct qla2xxx_mq_chain *mq = ptr;
356 struct device_reg_25xxmq __iomem *reg; 356 struct device_reg_25xxmq __iomem *reg;
357 357
@@ -363,9 +363,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
363 mq->type = __constant_htonl(DUMP_CHAIN_MQ); 363 mq->type = __constant_htonl(DUMP_CHAIN_MQ);
364 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain)); 364 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
365 365
366 req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues); 366 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
367 rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); 367 ha->max_req_queues : ha->max_rsp_queues;
368 que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
369 mq->count = htonl(que_cnt); 368 mq->count = htonl(que_cnt);
370 for (cnt = 0; cnt < que_cnt; cnt++) { 369 for (cnt = 0; cnt < que_cnt; cnt++) {
371 reg = (struct device_reg_25xxmq *) ((void *) 370 reg = (struct device_reg_25xxmq *) ((void *)
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 645cfd9e6cf6..57d659cf99ee 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -93,6 +93,7 @@
93#define LSD(x) ((uint32_t)((uint64_t)(x))) 93#define LSD(x) ((uint32_t)((uint64_t)(x)))
94#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) 94#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
95 95
96#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y)))
96 97
97/* 98/*
98 * I/O register 99 * I/O register
@@ -179,6 +180,7 @@
179#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */ 180#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */
180#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ 181#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
181#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ 182#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
183#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
182 184
183struct req_que; 185struct req_que;
184 186
@@ -2008,7 +2010,8 @@ typedef struct vport_params {
2008#define VP_RET_CODE_NOT_FOUND 6 2010#define VP_RET_CODE_NOT_FOUND 6
2009 2011
2010struct qla_hw_data; 2012struct qla_hw_data;
2011 2013struct req_que;
2014struct rsp_que;
2012/* 2015/*
2013 * ISP operations 2016 * ISP operations
2014 */ 2017 */
@@ -2030,10 +2033,9 @@ struct isp_operations {
2030 void (*enable_intrs) (struct qla_hw_data *); 2033 void (*enable_intrs) (struct qla_hw_data *);
2031 void (*disable_intrs) (struct qla_hw_data *); 2034 void (*disable_intrs) (struct qla_hw_data *);
2032 2035
2033 int (*abort_command) (struct scsi_qla_host *, srb_t *, 2036 int (*abort_command) (srb_t *);
2034 struct req_que *); 2037 int (*target_reset) (struct fc_port *, unsigned int, int);
2035 int (*target_reset) (struct fc_port *, unsigned int); 2038 int (*lun_reset) (struct fc_port *, unsigned int, int);
2036 int (*lun_reset) (struct fc_port *, unsigned int);
2037 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, 2039 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
2038 uint8_t, uint8_t, uint16_t *, uint8_t); 2040 uint8_t, uint8_t, uint16_t *, uint8_t);
2039 int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t, 2041 int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t,
@@ -2079,7 +2081,6 @@ struct isp_operations {
2079#define QLA_PCI_MSIX_CONTROL 0xa2 2081#define QLA_PCI_MSIX_CONTROL 0xa2
2080 2082
2081struct scsi_qla_host; 2083struct scsi_qla_host;
2082struct rsp_que;
2083 2084
2084struct qla_msix_entry { 2085struct qla_msix_entry {
2085 int have_irq; 2086 int have_irq;
@@ -2140,7 +2141,6 @@ struct qla_statistics {
2140#define MBC_INITIALIZE_MULTIQ 0x1f 2141#define MBC_INITIALIZE_MULTIQ 0x1f
2141#define QLA_QUE_PAGE 0X1000 2142#define QLA_QUE_PAGE 0X1000
2142#define QLA_MQ_SIZE 32 2143#define QLA_MQ_SIZE 32
2143#define QLA_MAX_HOST_QUES 16
2144#define QLA_MAX_QUEUES 256 2144#define QLA_MAX_QUEUES 256
2145#define ISP_QUE_REG(ha, id) \ 2145#define ISP_QUE_REG(ha, id) \
2146 ((ha->mqenable) ? \ 2146 ((ha->mqenable) ? \
@@ -2170,6 +2170,7 @@ struct rsp_que {
2170 struct qla_hw_data *hw; 2170 struct qla_hw_data *hw;
2171 struct qla_msix_entry *msix; 2171 struct qla_msix_entry *msix;
2172 struct req_que *req; 2172 struct req_que *req;
2173 srb_t *status_srb; /* status continuation entry */
2173}; 2174};
2174 2175
2175/* Request queue data structure */ 2176/* Request queue data structure */
@@ -2246,7 +2247,8 @@ struct qla_hw_data {
2246 struct rsp_que **rsp_q_map; 2247 struct rsp_que **rsp_q_map;
2247 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; 2248 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2248 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; 2249 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2249 uint16_t max_queues; 2250 uint8_t max_req_queues;
2251 uint8_t max_rsp_queues;
2250 struct qla_npiv_entry *npiv_info; 2252 struct qla_npiv_entry *npiv_info;
2251 uint16_t nvram_npiv_size; 2253 uint16_t nvram_npiv_size;
2252 2254
@@ -2532,6 +2534,7 @@ struct qla_hw_data {
2532 uint16_t num_vsans; /* number of vsan created */ 2534 uint16_t num_vsans; /* number of vsan created */
2533 uint16_t max_npiv_vports; /* 63 or 125 per topoloty */ 2535 uint16_t max_npiv_vports; /* 63 or 125 per topoloty */
2534 int cur_vport_count; 2536 int cur_vport_count;
2537 uint16_t flex_port_count;
2535 2538
2536 struct qla_chip_state_84xx *cs84xx; 2539 struct qla_chip_state_84xx *cs84xx;
2537 struct qla_statistics qla_stats; 2540 struct qla_statistics qla_stats;
@@ -2591,8 +2594,6 @@ typedef struct scsi_qla_host {
2591#define SWITCH_FOUND BIT_0 2594#define SWITCH_FOUND BIT_0
2592#define DFLG_NO_CABLE BIT_1 2595#define DFLG_NO_CABLE BIT_1
2593 2596
2594 srb_t *status_srb; /* Status continuation entry. */
2595
2596 /* ISP configuration data. */ 2597 /* ISP configuration data. */
2597 uint16_t loop_id; /* Host adapter loop id */ 2598 uint16_t loop_id; /* Host adapter loop id */
2598 2599
@@ -2648,7 +2649,7 @@ typedef struct scsi_qla_host {
2648#define VP_ERR_FAB_LOGOUT 4 2649#define VP_ERR_FAB_LOGOUT 4
2649#define VP_ERR_ADAP_NORESOURCES 5 2650#define VP_ERR_ADAP_NORESOURCES 5
2650 struct qla_hw_data *hw; 2651 struct qla_hw_data *hw;
2651 int req_ques[QLA_MAX_HOST_QUES]; 2652 struct req_que *req;
2652} scsi_qla_host_t; 2653} scsi_qla_host_t;
2653 2654
2654/* 2655/*
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 528913f6bed9..b12de0176246 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -67,6 +67,7 @@ extern int ql2xextended_error_logging;
67extern int ql2xqfullrampup; 67extern int ql2xqfullrampup;
68extern int ql2xiidmaenable; 68extern int ql2xiidmaenable;
69extern int ql2xmaxqueues; 69extern int ql2xmaxqueues;
70extern int ql2xmultique_tag;
70 71
71extern int qla2x00_loop_reset(scsi_qla_host_t *); 72extern int qla2x00_loop_reset(scsi_qla_host_t *);
72extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 73extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -165,13 +166,13 @@ extern int
165qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); 166qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
166 167
167extern int 168extern int
168qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); 169qla2x00_abort_command(srb_t *);
169 170
170extern int 171extern int
171qla2x00_abort_target(struct fc_port *, unsigned int); 172qla2x00_abort_target(struct fc_port *, unsigned int, int);
172 173
173extern int 174extern int
174qla2x00_lun_reset(struct fc_port *, unsigned int); 175qla2x00_lun_reset(struct fc_port *, unsigned int, int);
175 176
176extern int 177extern int
177qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, 178qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
@@ -236,9 +237,11 @@ extern int
236qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, 237qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
237 dma_addr_t); 238 dma_addr_t);
238 239
239extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); 240extern int qla24xx_abort_command(srb_t *);
240extern int qla24xx_abort_target(struct fc_port *, unsigned int); 241extern int
241extern int qla24xx_lun_reset(struct fc_port *, unsigned int); 242qla24xx_abort_target(struct fc_port *, unsigned int, int);
243extern int
244qla24xx_lun_reset(struct fc_port *, unsigned int, int);
242 245
243extern int 246extern int
244qla2x00_system_error(scsi_qla_host_t *); 247qla2x00_system_error(scsi_qla_host_t *);
@@ -295,8 +298,8 @@ extern irqreturn_t qla2100_intr_handler(int, void *);
295extern irqreturn_t qla2300_intr_handler(int, void *); 298extern irqreturn_t qla2300_intr_handler(int, void *);
296extern irqreturn_t qla24xx_intr_handler(int, void *); 299extern irqreturn_t qla24xx_intr_handler(int, void *);
297extern void qla2x00_process_response_queue(struct rsp_que *); 300extern void qla2x00_process_response_queue(struct rsp_que *);
298extern void qla24xx_process_response_queue(struct rsp_que *); 301extern void
299 302qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *);
300extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *); 303extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
301extern void qla2x00_free_irqs(scsi_qla_host_t *); 304extern void qla2x00_free_irqs(scsi_qla_host_t *);
302 305
@@ -401,19 +404,21 @@ extern int qla25xx_request_irq(struct rsp_que *);
401extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); 404extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
402extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); 405extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
403extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, 406extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
404 uint16_t, uint8_t, uint8_t); 407 uint16_t, int, uint8_t);
405extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, 408extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
406 uint16_t); 409 uint16_t, int);
407extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t); 410extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t);
408extern void qla2x00_init_response_q_entries(struct rsp_que *); 411extern void qla2x00_init_response_q_entries(struct rsp_que *);
409extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *); 412extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
410extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *); 413extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
411extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t); 414extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t);
412extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t); 415extern int qla25xx_delete_queues(struct scsi_qla_host *);
413extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t); 416extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
414extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t); 417extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
415extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 418extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
416extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); 419extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
417extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 420extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
418extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); 421extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
422extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
423
419#endif /* _QLA_GBL_H */ 424#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 9a343ec67567..059909c9f29b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -786,7 +786,6 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
786 sizeof(uint32_t); 786 sizeof(uint32_t);
787 if (ha->mqenable) 787 if (ha->mqenable)
788 mq_size = sizeof(struct qla2xxx_mq_chain); 788 mq_size = sizeof(struct qla2xxx_mq_chain);
789
790 /* Allocate memory for Fibre Channel Event Buffer. */ 789 /* Allocate memory for Fibre Channel Event Buffer. */
791 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 790 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
792 goto try_eft; 791 goto try_eft;
@@ -850,8 +849,7 @@ cont_alloc:
850 rsp_q_size = rsp->length * sizeof(response_t); 849 rsp_q_size = rsp->length * sizeof(response_t);
851 850
852 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 851 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
853 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + 852 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
854 eft_size;
855 ha->chain_offset = dump_size; 853 ha->chain_offset = dump_size;
856 dump_size += mq_size + fce_size; 854 dump_size += mq_size + fce_size;
857 855
@@ -1013,12 +1011,14 @@ qla2x00_init_response_q_entries(struct rsp_que *rsp)
1013 uint16_t cnt; 1011 uint16_t cnt;
1014 response_t *pkt; 1012 response_t *pkt;
1015 1013
1014 rsp->ring_ptr = rsp->ring;
1015 rsp->ring_index = 0;
1016 rsp->status_srb = NULL;
1016 pkt = rsp->ring_ptr; 1017 pkt = rsp->ring_ptr;
1017 for (cnt = 0; cnt < rsp->length; cnt++) { 1018 for (cnt = 0; cnt < rsp->length; cnt++) {
1018 pkt->signature = RESPONSE_PROCESSED; 1019 pkt->signature = RESPONSE_PROCESSED;
1019 pkt++; 1020 pkt++;
1020 } 1021 }
1021
1022} 1022}
1023 1023
1024/** 1024/**
@@ -1176,7 +1176,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1176 if (ha->flags.msix_enabled) { 1176 if (ha->flags.msix_enabled) {
1177 msix = &ha->msix_entries[1]; 1177 msix = &ha->msix_entries[1];
1178 DEBUG2_17(printk(KERN_INFO 1178 DEBUG2_17(printk(KERN_INFO
1179 "Reistering vector 0x%x for base que\n", msix->entry)); 1179 "Registering vector 0x%x for base que\n", msix->entry));
1180 icb->msix = cpu_to_le16(msix->entry); 1180 icb->msix = cpu_to_le16(msix->entry);
1181 } 1181 }
1182 /* Use alternate PCI bus number */ 1182 /* Use alternate PCI bus number */
@@ -1230,14 +1230,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1230 spin_lock_irqsave(&ha->hardware_lock, flags); 1230 spin_lock_irqsave(&ha->hardware_lock, flags);
1231 1231
1232 /* Clear outstanding commands array. */ 1232 /* Clear outstanding commands array. */
1233 for (que = 0; que < ha->max_queues; que++) { 1233 for (que = 0; que < ha->max_req_queues; que++) {
1234 req = ha->req_q_map[que]; 1234 req = ha->req_q_map[que];
1235 if (!req) 1235 if (!req)
1236 continue; 1236 continue;
1237 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 1237 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1238 req->outstanding_cmds[cnt] = NULL; 1238 req->outstanding_cmds[cnt] = NULL;
1239 1239
1240 req->current_outstanding_cmd = 0; 1240 req->current_outstanding_cmd = 1;
1241 1241
1242 /* Initialize firmware. */ 1242 /* Initialize firmware. */
1243 req->ring_ptr = req->ring; 1243 req->ring_ptr = req->ring;
@@ -1245,13 +1245,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1245 req->cnt = req->length; 1245 req->cnt = req->length;
1246 } 1246 }
1247 1247
1248 for (que = 0; que < ha->max_queues; que++) { 1248 for (que = 0; que < ha->max_rsp_queues; que++) {
1249 rsp = ha->rsp_q_map[que]; 1249 rsp = ha->rsp_q_map[que];
1250 if (!rsp) 1250 if (!rsp)
1251 continue; 1251 continue;
1252 rsp->ring_ptr = rsp->ring;
1253 rsp->ring_index = 0;
1254
1255 /* Initialize response queue entries */ 1252 /* Initialize response queue entries */
1256 qla2x00_init_response_q_entries(rsp); 1253 qla2x00_init_response_q_entries(rsp);
1257 } 1254 }
@@ -3180,8 +3177,7 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3180{ 3177{
3181 int rval = QLA_SUCCESS; 3178 int rval = QLA_SUCCESS;
3182 uint32_t wait_time; 3179 uint32_t wait_time;
3183 struct qla_hw_data *ha = vha->hw; 3180 struct req_que *req = vha->req;
3184 struct req_que *req = ha->req_q_map[vha->req_ques[0]];
3185 struct rsp_que *rsp = req->rsp; 3181 struct rsp_que *rsp = req->rsp;
3186 3182
3187 atomic_set(&vha->loop_state, LOOP_UPDATE); 3183 atomic_set(&vha->loop_state, LOOP_UPDATE);
@@ -3448,7 +3444,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
3448 int ret = -1; 3444 int ret = -1;
3449 int i; 3445 int i;
3450 3446
3451 for (i = 1; i < ha->max_queues; i++) { 3447 for (i = 1; i < ha->max_rsp_queues; i++) {
3452 rsp = ha->rsp_q_map[i]; 3448 rsp = ha->rsp_q_map[i];
3453 if (rsp) { 3449 if (rsp) {
3454 rsp->options &= ~BIT_0; 3450 rsp->options &= ~BIT_0;
@@ -3462,6 +3458,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
3462 "%s Rsp que:%d inited\n", __func__, 3458 "%s Rsp que:%d inited\n", __func__,
3463 rsp->id)); 3459 rsp->id));
3464 } 3460 }
3461 }
3462 for (i = 1; i < ha->max_req_queues; i++) {
3465 req = ha->req_q_map[i]; 3463 req = ha->req_q_map[i];
3466 if (req) { 3464 if (req) {
3467 /* Clear outstanding commands array. */ 3465 /* Clear outstanding commands array. */
@@ -4165,7 +4163,7 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
4165 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4163 uint16_t mb[MAILBOX_REGISTER_COUNT];
4166 struct qla_hw_data *ha = vha->hw; 4164 struct qla_hw_data *ha = vha->hw;
4167 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4165 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4168 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 4166 struct req_que *req = vha->req;
4169 struct rsp_que *rsp = req->rsp; 4167 struct rsp_que *rsp = req->rsp;
4170 4168
4171 if (!vha->vp_idx) 4169 if (!vha->vp_idx)
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a8abbb95730d..94b69d86482d 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -453,6 +453,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
453 mrk24->lun[2] = MSB(lun); 453 mrk24->lun[2] = MSB(lun);
454 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 454 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
455 mrk24->vp_index = vha->vp_idx; 455 mrk24->vp_index = vha->vp_idx;
456 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
456 } else { 457 } else {
457 SET_TARGET_ID(ha, mrk->target, loop_id); 458 SET_TARGET_ID(ha, mrk->target, loop_id);
458 mrk->lun = cpu_to_le16(lun); 459 mrk->lun = cpu_to_le16(lun);
@@ -531,9 +532,6 @@ qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
531 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++) 532 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
532 *dword_ptr++ = 0; 533 *dword_ptr++ = 0;
533 534
534 /* Set system defined field. */
535 pkt->sys_define = (uint8_t)req->ring_index;
536
537 /* Set entry count. */ 535 /* Set entry count. */
538 pkt->entry_count = 1; 536 pkt->entry_count = 1;
539 537
@@ -724,19 +722,14 @@ qla24xx_start_scsi(srb_t *sp)
724 struct scsi_cmnd *cmd = sp->cmd; 722 struct scsi_cmnd *cmd = sp->cmd;
725 struct scsi_qla_host *vha = sp->fcport->vha; 723 struct scsi_qla_host *vha = sp->fcport->vha;
726 struct qla_hw_data *ha = vha->hw; 724 struct qla_hw_data *ha = vha->hw;
727 uint16_t que_id;
728 725
729 /* Setup device pointers. */ 726 /* Setup device pointers. */
730 ret = 0; 727 ret = 0;
731 que_id = vha->req_ques[0];
732 728
733 req = ha->req_q_map[que_id]; 729 req = vha->req;
730 rsp = ha->rsp_q_map[0];
734 sp->que = req; 731 sp->que = req;
735 732
736 if (req->rsp)
737 rsp = req->rsp;
738 else
739 rsp = ha->rsp_q_map[que_id];
740 /* So we know we haven't pci_map'ed anything yet */ 733 /* So we know we haven't pci_map'ed anything yet */
741 tot_dsds = 0; 734 tot_dsds = 0;
742 735
@@ -794,7 +787,7 @@ qla24xx_start_scsi(srb_t *sp)
794 req->cnt -= req_cnt; 787 req->cnt -= req_cnt;
795 788
796 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 789 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
797 cmd_pkt->handle = handle; 790 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
798 791
799 /* Zero out remaining portion of packet. */ 792 /* Zero out remaining portion of packet. */
800 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ 793 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -823,6 +816,8 @@ qla24xx_start_scsi(srb_t *sp)
823 816
824 /* Set total data segment count. */ 817 /* Set total data segment count. */
825 cmd_pkt->entry_count = (uint8_t)req_cnt; 818 cmd_pkt->entry_count = (uint8_t)req_cnt;
819 /* Specify response queue number where completion should happen */
820 cmd_pkt->entry_status = (uint8_t) rsp->id;
826 wmb(); 821 wmb();
827 822
828 /* Adjust ring index. */ 823 /* Adjust ring index. */
@@ -842,7 +837,7 @@ qla24xx_start_scsi(srb_t *sp)
842 /* Manage unprocessed RIO/ZIO commands in response queue. */ 837 /* Manage unprocessed RIO/ZIO commands in response queue. */
843 if (vha->flags.process_response_queue && 838 if (vha->flags.process_response_queue &&
844 rsp->ring_ptr->signature != RESPONSE_PROCESSED) 839 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
845 qla24xx_process_response_queue(rsp); 840 qla24xx_process_response_queue(vha, rsp);
846 841
847 spin_unlock_irqrestore(&ha->hardware_lock, flags); 842 spin_unlock_irqrestore(&ha->hardware_lock, flags);
848 return QLA_SUCCESS; 843 return QLA_SUCCESS;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d04981848e56..c8e906c702a1 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -13,10 +13,9 @@ static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13static void qla2x00_process_completed_request(struct scsi_qla_host *, 13static void qla2x00_process_completed_request(struct scsi_qla_host *,
14 struct req_que *, uint32_t); 14 struct req_que *, uint32_t);
15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
16static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); 16static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18 sts_entry_t *); 18 sts_entry_t *);
19static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
20 19
21/** 20/**
22 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 21 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -51,7 +50,7 @@ qla2100_intr_handler(int irq, void *dev_id)
51 status = 0; 50 status = 0;
52 51
53 spin_lock(&ha->hardware_lock); 52 spin_lock(&ha->hardware_lock);
54 vha = qla2x00_get_rsp_host(rsp); 53 vha = pci_get_drvdata(ha->pdev);
55 for (iter = 50; iter--; ) { 54 for (iter = 50; iter--; ) {
56 hccr = RD_REG_WORD(&reg->hccr); 55 hccr = RD_REG_WORD(&reg->hccr);
57 if (hccr & HCCR_RISC_PAUSE) { 56 if (hccr & HCCR_RISC_PAUSE) {
@@ -147,7 +146,7 @@ qla2300_intr_handler(int irq, void *dev_id)
147 status = 0; 146 status = 0;
148 147
149 spin_lock(&ha->hardware_lock); 148 spin_lock(&ha->hardware_lock);
150 vha = qla2x00_get_rsp_host(rsp); 149 vha = pci_get_drvdata(ha->pdev);
151 for (iter = 50; iter--; ) { 150 for (iter = 50; iter--; ) {
152 stat = RD_REG_DWORD(&reg->u.isp2300.host_status); 151 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
153 if (stat & HSR_RISC_PAUSED) { 152 if (stat & HSR_RISC_PAUSED) {
@@ -685,7 +684,7 @@ skip_rio:
685 vha->host_no)); 684 vha->host_no));
686 685
687 if (IS_FWI2_CAPABLE(ha)) 686 if (IS_FWI2_CAPABLE(ha))
688 qla24xx_process_response_queue(rsp); 687 qla24xx_process_response_queue(vha, rsp);
689 else 688 else
690 qla2x00_process_response_queue(rsp); 689 qla2x00_process_response_queue(rsp);
691 break; 690 break;
@@ -766,7 +765,7 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
766 struct qla_hw_data *ha = vha->hw; 765 struct qla_hw_data *ha = vha->hw;
767 struct req_que *req = NULL; 766 struct req_que *req = NULL;
768 767
769 req = ha->req_q_map[vha->req_ques[0]]; 768 req = vha->req;
770 if (!req) 769 if (!req)
771 return; 770 return;
772 if (req->max_q_depth <= sdev->queue_depth) 771 if (req->max_q_depth <= sdev->queue_depth)
@@ -858,8 +857,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
858 qla2x00_ramp_up_queue_depth(vha, req, sp); 857 qla2x00_ramp_up_queue_depth(vha, req, sp);
859 qla2x00_sp_compl(ha, sp); 858 qla2x00_sp_compl(ha, sp);
860 } else { 859 } else {
861 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", 860 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
862 vha->host_no)); 861 " handle(%d)\n", vha->host_no, req->id, index));
863 qla_printk(KERN_WARNING, ha, 862 qla_printk(KERN_WARNING, ha,
864 "Invalid ISP SCSI completion handle\n"); 863 "Invalid ISP SCSI completion handle\n");
865 864
@@ -881,7 +880,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
881 uint16_t handle_cnt; 880 uint16_t handle_cnt;
882 uint16_t cnt; 881 uint16_t cnt;
883 882
884 vha = qla2x00_get_rsp_host(rsp); 883 vha = pci_get_drvdata(ha->pdev);
885 884
886 if (!vha->flags.online) 885 if (!vha->flags.online)
887 return; 886 return;
@@ -926,7 +925,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
926 } 925 }
927 break; 926 break;
928 case STATUS_CONT_TYPE: 927 case STATUS_CONT_TYPE:
929 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); 928 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
930 break; 929 break;
931 default: 930 default:
932 /* Type Not Supported. */ 931 /* Type Not Supported. */
@@ -945,7 +944,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
945} 944}
946 945
947static inline void 946static inline void
948qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len) 947qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len,
948 struct rsp_que *rsp)
949{ 949{
950 struct scsi_cmnd *cp = sp->cmd; 950 struct scsi_cmnd *cp = sp->cmd;
951 951
@@ -962,7 +962,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
962 sp->request_sense_ptr += sense_len; 962 sp->request_sense_ptr += sense_len;
963 sp->request_sense_length -= sense_len; 963 sp->request_sense_length -= sense_len;
964 if (sp->request_sense_length != 0) 964 if (sp->request_sense_length != 0)
965 sp->fcport->vha->status_srb = sp; 965 rsp->status_srb = sp;
966 966
967 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 967 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
968 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no, 968 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
@@ -992,7 +992,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
992 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; 992 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
993 uint8_t *rsp_info, *sense_data; 993 uint8_t *rsp_info, *sense_data;
994 struct qla_hw_data *ha = vha->hw; 994 struct qla_hw_data *ha = vha->hw;
995 struct req_que *req = rsp->req; 995 uint32_t handle;
996 uint16_t que;
997 struct req_que *req;
996 998
997 sts = (sts_entry_t *) pkt; 999 sts = (sts_entry_t *) pkt;
998 sts24 = (struct sts_entry_24xx *) pkt; 1000 sts24 = (struct sts_entry_24xx *) pkt;
@@ -1003,18 +1005,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1003 comp_status = le16_to_cpu(sts->comp_status); 1005 comp_status = le16_to_cpu(sts->comp_status);
1004 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1006 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1005 } 1007 }
1006 1008 handle = (uint32_t) LSW(sts->handle);
1009 que = MSW(sts->handle);
1010 req = ha->req_q_map[que];
1007 /* Fast path completion. */ 1011 /* Fast path completion. */
1008 if (comp_status == CS_COMPLETE && scsi_status == 0) { 1012 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1009 qla2x00_process_completed_request(vha, req, sts->handle); 1013 qla2x00_process_completed_request(vha, req, handle);
1010 1014
1011 return; 1015 return;
1012 } 1016 }
1013 1017
1014 /* Validate handle. */ 1018 /* Validate handle. */
1015 if (sts->handle < MAX_OUTSTANDING_COMMANDS) { 1019 if (handle < MAX_OUTSTANDING_COMMANDS) {
1016 sp = req->outstanding_cmds[sts->handle]; 1020 sp = req->outstanding_cmds[handle];
1017 req->outstanding_cmds[sts->handle] = NULL; 1021 req->outstanding_cmds[handle] = NULL;
1018 } else 1022 } else
1019 sp = NULL; 1023 sp = NULL;
1020 1024
@@ -1030,7 +1034,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1030 cp = sp->cmd; 1034 cp = sp->cmd;
1031 if (cp == NULL) { 1035 if (cp == NULL) {
1032 DEBUG2(printk("scsi(%ld): Command already returned back to OS " 1036 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
1033 "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp)); 1037 "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp));
1034 qla_printk(KERN_WARNING, ha, 1038 qla_printk(KERN_WARNING, ha,
1035 "Command is NULL: already returned to OS (sp=%p)\n", sp); 1039 "Command is NULL: already returned to OS (sp=%p)\n", sp);
1036 1040
@@ -1133,7 +1137,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1133 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1137 if (!(scsi_status & SS_SENSE_LEN_VALID))
1134 break; 1138 break;
1135 1139
1136 qla2x00_handle_sense(sp, sense_data, sense_len); 1140 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1137 break; 1141 break;
1138 1142
1139 case CS_DATA_UNDERRUN: 1143 case CS_DATA_UNDERRUN:
@@ -1192,7 +1196,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1192 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1196 if (!(scsi_status & SS_SENSE_LEN_VALID))
1193 break; 1197 break;
1194 1198
1195 qla2x00_handle_sense(sp, sense_data, sense_len); 1199 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1196 } else { 1200 } else {
1197 /* 1201 /*
1198 * If RISC reports underrun and target does not report 1202 * If RISC reports underrun and target does not report
@@ -1334,7 +1338,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1334 } 1338 }
1335 1339
1336 /* Place command on done queue. */ 1340 /* Place command on done queue. */
1337 if (vha->status_srb == NULL) 1341 if (rsp->status_srb == NULL)
1338 qla2x00_sp_compl(ha, sp); 1342 qla2x00_sp_compl(ha, sp);
1339} 1343}
1340 1344
@@ -1346,11 +1350,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1346 * Extended sense data. 1350 * Extended sense data.
1347 */ 1351 */
1348static void 1352static void
1349qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) 1353qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1350{ 1354{
1351 uint8_t sense_sz = 0; 1355 uint8_t sense_sz = 0;
1352 struct qla_hw_data *ha = vha->hw; 1356 struct qla_hw_data *ha = rsp->hw;
1353 srb_t *sp = vha->status_srb; 1357 srb_t *sp = rsp->status_srb;
1354 struct scsi_cmnd *cp; 1358 struct scsi_cmnd *cp;
1355 1359
1356 if (sp != NULL && sp->request_sense_length != 0) { 1360 if (sp != NULL && sp->request_sense_length != 0) {
@@ -1362,7 +1366,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1362 "cmd is NULL: already returned to OS (sp=%p)\n", 1366 "cmd is NULL: already returned to OS (sp=%p)\n",
1363 sp); 1367 sp);
1364 1368
1365 vha->status_srb = NULL; 1369 rsp->status_srb = NULL;
1366 return; 1370 return;
1367 } 1371 }
1368 1372
@@ -1383,7 +1387,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1383 1387
1384 /* Place command on done queue. */ 1388 /* Place command on done queue. */
1385 if (sp->request_sense_length == 0) { 1389 if (sp->request_sense_length == 0) {
1386 vha->status_srb = NULL; 1390 rsp->status_srb = NULL;
1387 qla2x00_sp_compl(ha, sp); 1391 qla2x00_sp_compl(ha, sp);
1388 } 1392 }
1389 } 1393 }
@@ -1399,7 +1403,9 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1399{ 1403{
1400 srb_t *sp; 1404 srb_t *sp;
1401 struct qla_hw_data *ha = vha->hw; 1405 struct qla_hw_data *ha = vha->hw;
1402 struct req_que *req = rsp->req; 1406 uint32_t handle = LSW(pkt->handle);
1407 uint16_t que = MSW(pkt->handle);
1408 struct req_que *req = ha->req_q_map[que];
1403#if defined(QL_DEBUG_LEVEL_2) 1409#if defined(QL_DEBUG_LEVEL_2)
1404 if (pkt->entry_status & RF_INV_E_ORDER) 1410 if (pkt->entry_status & RF_INV_E_ORDER)
1405 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1411 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
@@ -1417,14 +1423,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1417#endif 1423#endif
1418 1424
1419 /* Validate handle. */ 1425 /* Validate handle. */
1420 if (pkt->handle < MAX_OUTSTANDING_COMMANDS) 1426 if (handle < MAX_OUTSTANDING_COMMANDS)
1421 sp = req->outstanding_cmds[pkt->handle]; 1427 sp = req->outstanding_cmds[handle];
1422 else 1428 else
1423 sp = NULL; 1429 sp = NULL;
1424 1430
1425 if (sp) { 1431 if (sp) {
1426 /* Free outstanding command slot. */ 1432 /* Free outstanding command slot. */
1427 req->outstanding_cmds[pkt->handle] = NULL; 1433 req->outstanding_cmds[handle] = NULL;
1428 1434
1429 /* Bad payload or header */ 1435 /* Bad payload or header */
1430 if (pkt->entry_status & 1436 if (pkt->entry_status &
@@ -1486,13 +1492,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1486 * qla24xx_process_response_queue() - Process response queue entries. 1492 * qla24xx_process_response_queue() - Process response queue entries.
1487 * @ha: SCSI driver HA context 1493 * @ha: SCSI driver HA context
1488 */ 1494 */
1489void 1495void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1490qla24xx_process_response_queue(struct rsp_que *rsp) 1496 struct rsp_que *rsp)
1491{ 1497{
1492 struct sts_entry_24xx *pkt; 1498 struct sts_entry_24xx *pkt;
1493 struct scsi_qla_host *vha;
1494
1495 vha = qla2x00_get_rsp_host(rsp);
1496 1499
1497 if (!vha->flags.online) 1500 if (!vha->flags.online)
1498 return; 1501 return;
@@ -1523,7 +1526,7 @@ qla24xx_process_response_queue(struct rsp_que *rsp)
1523 qla2x00_status_entry(vha, rsp, pkt); 1526 qla2x00_status_entry(vha, rsp, pkt);
1524 break; 1527 break;
1525 case STATUS_CONT_TYPE: 1528 case STATUS_CONT_TYPE:
1526 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); 1529 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1527 break; 1530 break;
1528 case VP_RPT_ID_IOCB_TYPE: 1531 case VP_RPT_ID_IOCB_TYPE:
1529 qla24xx_report_id_acquisition(vha, 1532 qla24xx_report_id_acquisition(vha,
@@ -1626,7 +1629,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1626 status = 0; 1629 status = 0;
1627 1630
1628 spin_lock(&ha->hardware_lock); 1631 spin_lock(&ha->hardware_lock);
1629 vha = qla2x00_get_rsp_host(rsp); 1632 vha = pci_get_drvdata(ha->pdev);
1630 for (iter = 50; iter--; ) { 1633 for (iter = 50; iter--; ) {
1631 stat = RD_REG_DWORD(&reg->host_status); 1634 stat = RD_REG_DWORD(&reg->host_status);
1632 if (stat & HSRX_RISC_PAUSED) { 1635 if (stat & HSRX_RISC_PAUSED) {
@@ -1664,7 +1667,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1664 break; 1667 break;
1665 case 0x13: 1668 case 0x13:
1666 case 0x14: 1669 case 0x14:
1667 qla24xx_process_response_queue(rsp); 1670 qla24xx_process_response_queue(vha, rsp);
1668 break; 1671 break;
1669 default: 1672 default:
1670 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1673 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1692,6 +1695,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1692 struct qla_hw_data *ha; 1695 struct qla_hw_data *ha;
1693 struct rsp_que *rsp; 1696 struct rsp_que *rsp;
1694 struct device_reg_24xx __iomem *reg; 1697 struct device_reg_24xx __iomem *reg;
1698 struct scsi_qla_host *vha;
1695 1699
1696 rsp = (struct rsp_que *) dev_id; 1700 rsp = (struct rsp_que *) dev_id;
1697 if (!rsp) { 1701 if (!rsp) {
@@ -1704,7 +1708,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1704 1708
1705 spin_lock_irq(&ha->hardware_lock); 1709 spin_lock_irq(&ha->hardware_lock);
1706 1710
1707 qla24xx_process_response_queue(rsp); 1711 vha = qla25xx_get_host(rsp);
1712 qla24xx_process_response_queue(vha, rsp);
1708 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1713 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1709 1714
1710 spin_unlock_irq(&ha->hardware_lock); 1715 spin_unlock_irq(&ha->hardware_lock);
@@ -1713,31 +1718,6 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1713} 1718}
1714 1719
1715static irqreturn_t 1720static irqreturn_t
1716qla25xx_msix_rsp_q(int irq, void *dev_id)
1717{
1718 struct qla_hw_data *ha;
1719 struct rsp_que *rsp;
1720 struct device_reg_24xx __iomem *reg;
1721
1722 rsp = (struct rsp_que *) dev_id;
1723 if (!rsp) {
1724 printk(KERN_INFO
1725 "%s(): NULL response queue pointer\n", __func__);
1726 return IRQ_NONE;
1727 }
1728 ha = rsp->hw;
1729 reg = &ha->iobase->isp24;
1730
1731 spin_lock_irq(&ha->hardware_lock);
1732
1733 qla24xx_process_response_queue(rsp);
1734
1735 spin_unlock_irq(&ha->hardware_lock);
1736
1737 return IRQ_HANDLED;
1738}
1739
1740static irqreturn_t
1741qla24xx_msix_default(int irq, void *dev_id) 1721qla24xx_msix_default(int irq, void *dev_id)
1742{ 1722{
1743 scsi_qla_host_t *vha; 1723 scsi_qla_host_t *vha;
@@ -1760,7 +1740,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1760 status = 0; 1740 status = 0;
1761 1741
1762 spin_lock_irq(&ha->hardware_lock); 1742 spin_lock_irq(&ha->hardware_lock);
1763 vha = qla2x00_get_rsp_host(rsp); 1743 vha = pci_get_drvdata(ha->pdev);
1764 do { 1744 do {
1765 stat = RD_REG_DWORD(&reg->host_status); 1745 stat = RD_REG_DWORD(&reg->host_status);
1766 if (stat & HSRX_RISC_PAUSED) { 1746 if (stat & HSRX_RISC_PAUSED) {
@@ -1798,7 +1778,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1798 break; 1778 break;
1799 case 0x13: 1779 case 0x13:
1800 case 0x14: 1780 case 0x14:
1801 qla24xx_process_response_queue(rsp); 1781 qla24xx_process_response_queue(vha, rsp);
1802 break; 1782 break;
1803 default: 1783 default:
1804 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1784 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1822,31 +1802,13 @@ qla24xx_msix_default(int irq, void *dev_id)
1822/* Interrupt handling helpers. */ 1802/* Interrupt handling helpers. */
1823 1803
1824struct qla_init_msix_entry { 1804struct qla_init_msix_entry {
1825 uint16_t entry;
1826 uint16_t index;
1827 const char *name; 1805 const char *name;
1828 irq_handler_t handler; 1806 irq_handler_t handler;
1829}; 1807};
1830 1808
1831static struct qla_init_msix_entry base_queue = { 1809static struct qla_init_msix_entry msix_entries[2] = {
1832 .entry = 0, 1810 { "qla2xxx (default)", qla24xx_msix_default },
1833 .index = 0, 1811 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
1834 .name = "qla2xxx (default)",
1835 .handler = qla24xx_msix_default,
1836};
1837
1838static struct qla_init_msix_entry base_rsp_queue = {
1839 .entry = 1,
1840 .index = 1,
1841 .name = "qla2xxx (rsp_q)",
1842 .handler = qla24xx_msix_rsp_q,
1843};
1844
1845static struct qla_init_msix_entry multi_rsp_queue = {
1846 .entry = 1,
1847 .index = 1,
1848 .name = "qla2xxx (multi_q)",
1849 .handler = qla25xx_msix_rsp_q,
1850}; 1812};
1851 1813
1852static void 1814static void
@@ -1873,7 +1835,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1873 int i, ret; 1835 int i, ret;
1874 struct msix_entry *entries; 1836 struct msix_entry *entries;
1875 struct qla_msix_entry *qentry; 1837 struct qla_msix_entry *qentry;
1876 struct qla_init_msix_entry *msix_queue;
1877 1838
1878 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 1839 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1879 GFP_KERNEL); 1840 GFP_KERNEL);
@@ -1900,7 +1861,7 @@ msix_failed:
1900 ha->msix_count, ret); 1861 ha->msix_count, ret);
1901 goto msix_out; 1862 goto msix_out;
1902 } 1863 }
1903 ha->max_queues = ha->msix_count - 1; 1864 ha->max_rsp_queues = ha->msix_count - 1;
1904 } 1865 }
1905 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 1866 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1906 ha->msix_count, GFP_KERNEL); 1867 ha->msix_count, GFP_KERNEL);
@@ -1918,45 +1879,27 @@ msix_failed:
1918 qentry->rsp = NULL; 1879 qentry->rsp = NULL;
1919 } 1880 }
1920 1881
1921 /* Enable MSI-X for AENs for queue 0 */ 1882 /* Enable MSI-X vectors for the base queue */
1922 qentry = &ha->msix_entries[0]; 1883 for (i = 0; i < 2; i++) {
1923 ret = request_irq(qentry->vector, base_queue.handler, 0, 1884 qentry = &ha->msix_entries[i];
1924 base_queue.name, rsp); 1885 ret = request_irq(qentry->vector, msix_entries[i].handler,
1925 if (ret) { 1886 0, msix_entries[i].name, rsp);
1926 qla_printk(KERN_WARNING, ha, 1887 if (ret) {
1888 qla_printk(KERN_WARNING, ha,
1927 "MSI-X: Unable to register handler -- %x/%d.\n", 1889 "MSI-X: Unable to register handler -- %x/%d.\n",
1928 qentry->vector, ret); 1890 qentry->vector, ret);
1929 qla24xx_disable_msix(ha); 1891 qla24xx_disable_msix(ha);
1930 goto msix_out; 1892 ha->mqenable = 0;
1893 goto msix_out;
1894 }
1895 qentry->have_irq = 1;
1896 qentry->rsp = rsp;
1897 rsp->msix = qentry;
1931 } 1898 }
1932 qentry->have_irq = 1;
1933 qentry->rsp = rsp;
1934 1899
1935 /* Enable MSI-X vector for response queue update for queue 0 */ 1900 /* Enable MSI-X vector for response queue update for queue 0 */
1936 if (ha->max_queues > 1 && ha->mqiobase) { 1901 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
1937 ha->mqenable = 1; 1902 ha->mqenable = 1;
1938 msix_queue = &multi_rsp_queue;
1939 qla_printk(KERN_INFO, ha,
1940 "MQ enabled, Number of Queue Resources: %d \n",
1941 ha->max_queues);
1942 } else {
1943 ha->mqenable = 0;
1944 msix_queue = &base_rsp_queue;
1945 }
1946
1947 qentry = &ha->msix_entries[1];
1948 ret = request_irq(qentry->vector, msix_queue->handler, 0,
1949 msix_queue->name, rsp);
1950 if (ret) {
1951 qla_printk(KERN_WARNING, ha,
1952 "MSI-X: Unable to register handler -- %x/%d.\n",
1953 qentry->vector, ret);
1954 qla24xx_disable_msix(ha);
1955 ha->mqenable = 0;
1956 goto msix_out;
1957 }
1958 qentry->have_irq = 1;
1959 qentry->rsp = rsp;
1960 1903
1961msix_out: 1904msix_out:
1962 kfree(entries); 1905 kfree(entries);
@@ -2063,35 +2006,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
2063 } 2006 }
2064} 2007}
2065 2008
2066static struct scsi_qla_host *
2067qla2x00_get_rsp_host(struct rsp_que *rsp)
2068{
2069 srb_t *sp;
2070 struct qla_hw_data *ha = rsp->hw;
2071 struct scsi_qla_host *vha = NULL;
2072 struct sts_entry_24xx *pkt;
2073 struct req_que *req;
2074
2075 if (rsp->id) {
2076 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2077 req = rsp->req;
2078 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2079 sp = req->outstanding_cmds[pkt->handle];
2080 if (sp)
2081 vha = sp->fcport->vha;
2082 }
2083 }
2084 if (!vha)
2085 /* handle it in base queue */
2086 vha = pci_get_drvdata(ha->pdev);
2087
2088 return vha;
2089}
2090 2009
2091int qla25xx_request_irq(struct rsp_que *rsp) 2010int qla25xx_request_irq(struct rsp_que *rsp)
2092{ 2011{
2093 struct qla_hw_data *ha = rsp->hw; 2012 struct qla_hw_data *ha = rsp->hw;
2094 struct qla_init_msix_entry *intr = &multi_rsp_queue; 2013 struct qla_init_msix_entry *intr = &msix_entries[2];
2095 struct qla_msix_entry *msix = rsp->msix; 2014 struct qla_msix_entry *msix = rsp->msix;
2096 int ret; 2015 int ret;
2097 2016
@@ -2106,3 +2025,30 @@ int qla25xx_request_irq(struct rsp_que *rsp)
2106 msix->rsp = rsp; 2025 msix->rsp = rsp;
2107 return ret; 2026 return ret;
2108} 2027}
2028
2029struct scsi_qla_host *
2030qla25xx_get_host(struct rsp_que *rsp)
2031{
2032 srb_t *sp;
2033 struct qla_hw_data *ha = rsp->hw;
2034 struct scsi_qla_host *vha = NULL;
2035 struct sts_entry_24xx *pkt;
2036 struct req_que *req;
2037 uint16_t que;
2038 uint32_t handle;
2039
2040 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2041 que = MSW(pkt->handle);
2042 handle = (uint32_t) LSW(pkt->handle);
2043 req = ha->req_q_map[que];
2044 if (handle < MAX_OUTSTANDING_COMMANDS) {
2045 sp = req->outstanding_cmds[handle];
2046 if (sp)
2047 return sp->fcport->vha;
2048 else
2049 goto base_que;
2050 }
2051base_que:
2052 vha = pci_get_drvdata(ha->pdev);
2053 return vha;
2054}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 4f7e94c4daaa..bfdc89f8569b 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -748,20 +748,20 @@ qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
748 * Kernel context. 748 * Kernel context.
749 */ 749 */
750int 750int
751qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) 751qla2x00_abort_command(srb_t *sp)
752{ 752{
753 unsigned long flags = 0; 753 unsigned long flags = 0;
754 fc_port_t *fcport;
755 int rval; 754 int rval;
756 uint32_t handle = 0; 755 uint32_t handle = 0;
757 mbx_cmd_t mc; 756 mbx_cmd_t mc;
758 mbx_cmd_t *mcp = &mc; 757 mbx_cmd_t *mcp = &mc;
758 fc_port_t *fcport = sp->fcport;
759 scsi_qla_host_t *vha = fcport->vha;
759 struct qla_hw_data *ha = vha->hw; 760 struct qla_hw_data *ha = vha->hw;
761 struct req_que *req = vha->req;
760 762
761 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); 763 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no));
762 764
763 fcport = sp->fcport;
764
765 spin_lock_irqsave(&ha->hardware_lock, flags); 765 spin_lock_irqsave(&ha->hardware_lock, flags);
766 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 766 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
767 if (req->outstanding_cmds[handle] == sp) 767 if (req->outstanding_cmds[handle] == sp)
@@ -800,7 +800,7 @@ qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
800} 800}
801 801
802int 802int
803qla2x00_abort_target(struct fc_port *fcport, unsigned int l) 803qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
804{ 804{
805 int rval, rval2; 805 int rval, rval2;
806 mbx_cmd_t mc; 806 mbx_cmd_t mc;
@@ -813,8 +813,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
813 813
814 l = l; 814 l = l;
815 vha = fcport->vha; 815 vha = fcport->vha;
816 req = vha->hw->req_q_map[0]; 816 req = vha->hw->req_q_map[tag];
817 rsp = vha->hw->rsp_q_map[0]; 817 rsp = vha->hw->rsp_q_map[tag];
818 mcp->mb[0] = MBC_ABORT_TARGET; 818 mcp->mb[0] = MBC_ABORT_TARGET;
819 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 819 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
820 if (HAS_EXTENDED_IDS(vha->hw)) { 820 if (HAS_EXTENDED_IDS(vha->hw)) {
@@ -850,7 +850,7 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
850} 850}
851 851
852int 852int
853qla2x00_lun_reset(struct fc_port *fcport, unsigned int l) 853qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
854{ 854{
855 int rval, rval2; 855 int rval, rval2;
856 mbx_cmd_t mc; 856 mbx_cmd_t mc;
@@ -862,8 +862,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
862 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); 862 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
863 863
864 vha = fcport->vha; 864 vha = fcport->vha;
865 req = vha->hw->req_q_map[0]; 865 req = vha->hw->req_q_map[tag];
866 rsp = vha->hw->rsp_q_map[0]; 866 rsp = vha->hw->rsp_q_map[tag];
867 mcp->mb[0] = MBC_LUN_RESET; 867 mcp->mb[0] = MBC_LUN_RESET;
868 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 868 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
869 if (HAS_EXTENDED_IDS(vha->hw)) 869 if (HAS_EXTENDED_IDS(vha->hw))
@@ -1492,9 +1492,14 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1492 dma_addr_t lg_dma; 1492 dma_addr_t lg_dma;
1493 uint32_t iop[2]; 1493 uint32_t iop[2];
1494 struct qla_hw_data *ha = vha->hw; 1494 struct qla_hw_data *ha = vha->hw;
1495 struct req_que *req;
1496 struct rsp_que *rsp;
1495 1497
1496 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1498 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1497 1499
1500 req = vha->req;
1501 rsp = req->rsp;
1502
1498 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1503 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1499 if (lg == NULL) { 1504 if (lg == NULL) {
1500 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", 1505 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n",
@@ -1505,6 +1510,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1505 1510
1506 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1511 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1507 lg->entry_count = 1; 1512 lg->entry_count = 1;
1513 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1508 lg->nport_handle = cpu_to_le16(loop_id); 1514 lg->nport_handle = cpu_to_le16(loop_id);
1509 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI); 1515 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
1510 if (opt & BIT_0) 1516 if (opt & BIT_0)
@@ -1753,6 +1759,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1753 struct logio_entry_24xx *lg; 1759 struct logio_entry_24xx *lg;
1754 dma_addr_t lg_dma; 1760 dma_addr_t lg_dma;
1755 struct qla_hw_data *ha = vha->hw; 1761 struct qla_hw_data *ha = vha->hw;
1762 struct req_que *req;
1763 struct rsp_que *rsp;
1756 1764
1757 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 1765 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1758 1766
@@ -1764,8 +1772,14 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1764 } 1772 }
1765 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1773 memset(lg, 0, sizeof(struct logio_entry_24xx));
1766 1774
1775 if (ql2xmaxqueues > 1)
1776 req = ha->req_q_map[0];
1777 else
1778 req = vha->req;
1779 rsp = req->rsp;
1767 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1780 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1768 lg->entry_count = 1; 1781 lg->entry_count = 1;
1782 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1769 lg->nport_handle = cpu_to_le16(loop_id); 1783 lg->nport_handle = cpu_to_le16(loop_id);
1770 lg->control_flags = 1784 lg->control_flags =
1771 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 1785 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
@@ -2204,21 +2218,21 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2204} 2218}
2205 2219
2206int 2220int
2207qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) 2221qla24xx_abort_command(srb_t *sp)
2208{ 2222{
2209 int rval; 2223 int rval;
2210 fc_port_t *fcport;
2211 unsigned long flags = 0; 2224 unsigned long flags = 0;
2212 2225
2213 struct abort_entry_24xx *abt; 2226 struct abort_entry_24xx *abt;
2214 dma_addr_t abt_dma; 2227 dma_addr_t abt_dma;
2215 uint32_t handle; 2228 uint32_t handle;
2229 fc_port_t *fcport = sp->fcport;
2230 struct scsi_qla_host *vha = fcport->vha;
2216 struct qla_hw_data *ha = vha->hw; 2231 struct qla_hw_data *ha = vha->hw;
2232 struct req_que *req = sp->que;
2217 2233
2218 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); 2234 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2219 2235
2220 fcport = sp->fcport;
2221
2222 spin_lock_irqsave(&ha->hardware_lock, flags); 2236 spin_lock_irqsave(&ha->hardware_lock, flags);
2223 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2237 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
2224 if (req->outstanding_cmds[handle] == sp) 2238 if (req->outstanding_cmds[handle] == sp)
@@ -2240,6 +2254,7 @@ qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
2240 2254
2241 abt->entry_type = ABORT_IOCB_TYPE; 2255 abt->entry_type = ABORT_IOCB_TYPE;
2242 abt->entry_count = 1; 2256 abt->entry_count = 1;
2257 abt->handle = MAKE_HANDLE(req->id, abt->handle);
2243 abt->nport_handle = cpu_to_le16(fcport->loop_id); 2258 abt->nport_handle = cpu_to_le16(fcport->loop_id);
2244 abt->handle_to_abort = handle; 2259 abt->handle_to_abort = handle;
2245 abt->port_id[0] = fcport->d_id.b.al_pa; 2260 abt->port_id[0] = fcport->d_id.b.al_pa;
@@ -2281,7 +2296,7 @@ struct tsk_mgmt_cmd {
2281 2296
2282static int 2297static int
2283__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, 2298__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2284 unsigned int l) 2299 unsigned int l, int tag)
2285{ 2300{
2286 int rval, rval2; 2301 int rval, rval2;
2287 struct tsk_mgmt_cmd *tsk; 2302 struct tsk_mgmt_cmd *tsk;
@@ -2295,8 +2310,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2295 2310
2296 vha = fcport->vha; 2311 vha = fcport->vha;
2297 ha = vha->hw; 2312 ha = vha->hw;
2298 req = ha->req_q_map[0]; 2313 req = vha->req;
2299 rsp = ha->rsp_q_map[0]; 2314 rsp = req->rsp;
2300 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 2315 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2301 if (tsk == NULL) { 2316 if (tsk == NULL) {
2302 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " 2317 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
@@ -2307,6 +2322,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2307 2322
2308 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 2323 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
2309 tsk->p.tsk.entry_count = 1; 2324 tsk->p.tsk.entry_count = 1;
2325 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
2310 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 2326 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
2311 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 2327 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2312 tsk->p.tsk.control_flags = cpu_to_le32(type); 2328 tsk->p.tsk.control_flags = cpu_to_le32(type);
@@ -2353,15 +2369,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2353} 2369}
2354 2370
2355int 2371int
2356qla24xx_abort_target(struct fc_port *fcport, unsigned int l) 2372qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
2357{ 2373{
2358 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l); 2374 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
2359} 2375}
2360 2376
2361int 2377int
2362qla24xx_lun_reset(struct fc_port *fcport, unsigned int l) 2378qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
2363{ 2379{
2364 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l); 2380 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
2365} 2381}
2366 2382
2367int 2383int
@@ -3150,6 +3166,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3150 WRT_REG_DWORD(&reg->req_q_in, 0); 3166 WRT_REG_DWORD(&reg->req_q_in, 0);
3151 WRT_REG_DWORD(&reg->req_q_out, 0); 3167 WRT_REG_DWORD(&reg->req_q_out, 0);
3152 } 3168 }
3169 req->req_q_in = &reg->req_q_in;
3170 req->req_q_out = &reg->req_q_out;
3153 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3171 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3154 3172
3155 rval = qla2x00_mailbox_command(vha, mcp); 3173 rval = qla2x00_mailbox_command(vha, mcp);
@@ -3176,7 +3194,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3176 mcp->mb[6] = MSW(MSD(rsp->dma)); 3194 mcp->mb[6] = MSW(MSD(rsp->dma));
3177 mcp->mb[7] = LSW(MSD(rsp->dma)); 3195 mcp->mb[7] = LSW(MSD(rsp->dma));
3178 mcp->mb[5] = rsp->length; 3196 mcp->mb[5] = rsp->length;
3179 mcp->mb[11] = rsp->vp_idx;
3180 mcp->mb[14] = rsp->msix->entry; 3197 mcp->mb[14] = rsp->msix->entry;
3181 mcp->mb[13] = rsp->rid; 3198 mcp->mb[13] = rsp->rid;
3182 3199
@@ -3188,7 +3205,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3188 mcp->mb[8] = 0; 3205 mcp->mb[8] = 0;
3189 /* que out ptr index */ 3206 /* que out ptr index */
3190 mcp->mb[9] = 0; 3207 mcp->mb[9] = 0;
3191 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7 3208 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
3192 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3209 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3193 mcp->in_mb = MBX_0; 3210 mcp->in_mb = MBX_0;
3194 mcp->flags = MBX_DMA_OUT; 3211 mcp->flags = MBX_DMA_OUT;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 51716c7e3008..9c08479c3e1b 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -398,9 +398,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
398 398
399 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); 399 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
400 400
401 memset(vha->req_ques, 0, sizeof(vha->req_ques)); 401 vha->req = base_vha->req;
402 vha->req_ques[0] = ha->req_q_map[0]->id; 402 host->can_queue = base_vha->req->length + 128;
403 host->can_queue = ha->req_q_map[0]->length + 128;
404 host->this_id = 255; 403 host->this_id = 255;
405 host->cmd_per_lun = 3; 404 host->cmd_per_lun = 3;
406 host->max_cmd_len = MAX_CMDSZ; 405 host->max_cmd_len = MAX_CMDSZ;
@@ -515,76 +514,53 @@ int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos)
515 514
516/* Delete all queues for a given vhost */ 515/* Delete all queues for a given vhost */
517int 516int
518qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no) 517qla25xx_delete_queues(struct scsi_qla_host *vha)
519{ 518{
520 int cnt, ret = 0; 519 int cnt, ret = 0;
521 struct req_que *req = NULL; 520 struct req_que *req = NULL;
522 struct rsp_que *rsp = NULL; 521 struct rsp_que *rsp = NULL;
523 struct qla_hw_data *ha = vha->hw; 522 struct qla_hw_data *ha = vha->hw;
524 523
525 if (que_no) { 524 /* Delete request queues */
526 /* Delete request queue */ 525 for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
527 req = ha->req_q_map[que_no]; 526 req = ha->req_q_map[cnt];
528 if (req) { 527 if (req) {
529 rsp = req->rsp;
530 ret = qla25xx_delete_req_que(vha, req); 528 ret = qla25xx_delete_req_que(vha, req);
531 if (ret != QLA_SUCCESS) { 529 if (ret != QLA_SUCCESS) {
532 qla_printk(KERN_WARNING, ha, 530 qla_printk(KERN_WARNING, ha,
533 "Couldn't delete req que %d\n", req->id); 531 "Couldn't delete req que %d\n",
532 req->id);
534 return ret; 533 return ret;
535 } 534 }
536 /* Delete associated response queue */
537 if (rsp) {
538 ret = qla25xx_delete_rsp_que(vha, rsp);
539 if (ret != QLA_SUCCESS) {
540 qla_printk(KERN_WARNING, ha,
541 "Couldn't delete rsp que %d\n",
542 rsp->id);
543 return ret;
544 }
545 }
546 } 535 }
547 } else { /* delete all queues of this host */ 536 }
548 for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) { 537
549 /* Delete request queues */ 538 /* Delete response queues */
550 req = ha->req_q_map[vha->req_ques[cnt]]; 539 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
551 if (req && req->id) { 540 rsp = ha->rsp_q_map[cnt];
552 rsp = req->rsp; 541 if (rsp) {
553 ret = qla25xx_delete_req_que(vha, req); 542 ret = qla25xx_delete_rsp_que(vha, rsp);
554 if (ret != QLA_SUCCESS) { 543 if (ret != QLA_SUCCESS) {
555 qla_printk(KERN_WARNING, ha, 544 qla_printk(KERN_WARNING, ha,
556 "Couldn't delete req que %d\n", 545 "Couldn't delete rsp que %d\n",
557 vha->req_ques[cnt]); 546 rsp->id);
558 return ret; 547 return ret;
559 }
560 vha->req_ques[cnt] = ha->req_q_map[0]->id;
561 /* Delete associated response queue */
562 if (rsp && rsp->id) {
563 ret = qla25xx_delete_rsp_que(vha, rsp);
564 if (ret != QLA_SUCCESS) {
565 qla_printk(KERN_WARNING, ha,
566 "Couldn't delete rsp que %d\n",
567 rsp->id);
568 return ret;
569 }
570 }
571 } 548 }
572 } 549 }
573 } 550 }
574 qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n",
575 vha->vp_idx);
576 return ret; 551 return ret;
577} 552}
578 553
579int 554int
580qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, 555qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
581 uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos) 556 uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
582{ 557{
583 int ret = 0; 558 int ret = 0;
584 struct req_que *req = NULL; 559 struct req_que *req = NULL;
585 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 560 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
586 uint16_t que_id = 0; 561 uint16_t que_id = 0;
587 device_reg_t __iomem *reg; 562 device_reg_t __iomem *reg;
563 uint32_t cnt;
588 564
589 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 565 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
590 if (req == NULL) { 566 if (req == NULL) {
@@ -604,8 +580,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
604 } 580 }
605 581
606 mutex_lock(&ha->vport_lock); 582 mutex_lock(&ha->vport_lock);
607 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues); 583 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
608 if (que_id >= ha->max_queues) { 584 if (que_id >= ha->max_req_queues) {
609 mutex_unlock(&ha->vport_lock); 585 mutex_unlock(&ha->vport_lock);
610 qla_printk(KERN_INFO, ha, "No resources to create " 586 qla_printk(KERN_INFO, ha, "No resources to create "
611 "additional request queue\n"); 587 "additional request queue\n");
@@ -617,10 +593,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
617 req->vp_idx = vp_idx; 593 req->vp_idx = vp_idx;
618 req->qos = qos; 594 req->qos = qos;
619 595
620 if (ha->rsp_q_map[rsp_que]) { 596 if (rsp_que < 0)
597 req->rsp = NULL;
598 else
621 req->rsp = ha->rsp_q_map[rsp_que]; 599 req->rsp = ha->rsp_q_map[rsp_que];
622 req->rsp->req = req;
623 }
624 /* Use alternate PCI bus number */ 600 /* Use alternate PCI bus number */
625 if (MSB(req->rid)) 601 if (MSB(req->rid))
626 options |= BIT_4; 602 options |= BIT_4;
@@ -628,13 +604,16 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
628 if (LSB(req->rid)) 604 if (LSB(req->rid))
629 options |= BIT_5; 605 options |= BIT_5;
630 req->options = options; 606 req->options = options;
607
608 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
609 req->outstanding_cmds[cnt] = NULL;
610 req->current_outstanding_cmd = 1;
611
631 req->ring_ptr = req->ring; 612 req->ring_ptr = req->ring;
632 req->ring_index = 0; 613 req->ring_index = 0;
633 req->cnt = req->length; 614 req->cnt = req->length;
634 req->id = que_id; 615 req->id = que_id;
635 reg = ISP_QUE_REG(ha, que_id); 616 reg = ISP_QUE_REG(ha, que_id);
636 req->req_q_in = &reg->isp25mq.req_q_in;
637 req->req_q_out = &reg->isp25mq.req_q_out;
638 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 617 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
639 mutex_unlock(&ha->vport_lock); 618 mutex_unlock(&ha->vport_lock);
640 619
@@ -657,7 +636,7 @@ que_failed:
657/* create response queue */ 636/* create response queue */
658int 637int
659qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, 638qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
660 uint8_t vp_idx, uint16_t rid) 639 uint8_t vp_idx, uint16_t rid, int req)
661{ 640{
662 int ret = 0; 641 int ret = 0;
663 struct rsp_que *rsp = NULL; 642 struct rsp_que *rsp = NULL;
@@ -672,7 +651,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
672 goto que_failed; 651 goto que_failed;
673 } 652 }
674 653
675 rsp->length = RESPONSE_ENTRY_CNT_2300; 654 rsp->length = RESPONSE_ENTRY_CNT_MQ;
676 rsp->ring = dma_alloc_coherent(&ha->pdev->dev, 655 rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
677 (rsp->length + 1) * sizeof(response_t), 656 (rsp->length + 1) * sizeof(response_t),
678 &rsp->dma, GFP_KERNEL); 657 &rsp->dma, GFP_KERNEL);
@@ -683,8 +662,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
683 } 662 }
684 663
685 mutex_lock(&ha->vport_lock); 664 mutex_lock(&ha->vport_lock);
686 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); 665 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
687 if (que_id >= ha->max_queues) { 666 if (que_id >= ha->max_rsp_queues) {
688 mutex_unlock(&ha->vport_lock); 667 mutex_unlock(&ha->vport_lock);
689 qla_printk(KERN_INFO, ha, "No resources to create " 668 qla_printk(KERN_INFO, ha, "No resources to create "
690 "additional response queue\n"); 669 "additional response queue\n");
@@ -708,8 +687,6 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
708 if (LSB(rsp->rid)) 687 if (LSB(rsp->rid))
709 options |= BIT_5; 688 options |= BIT_5;
710 rsp->options = options; 689 rsp->options = options;
711 rsp->ring_ptr = rsp->ring;
712 rsp->ring_index = 0;
713 rsp->id = que_id; 690 rsp->id = que_id;
714 reg = ISP_QUE_REG(ha, que_id); 691 reg = ISP_QUE_REG(ha, que_id);
715 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in; 692 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
@@ -728,9 +705,12 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
728 mutex_unlock(&ha->vport_lock); 705 mutex_unlock(&ha->vport_lock);
729 goto que_failed; 706 goto que_failed;
730 } 707 }
708 if (req >= 0)
709 rsp->req = ha->req_q_map[req];
710 else
711 rsp->req = NULL;
731 712
732 qla2x00_init_response_q_entries(rsp); 713 qla2x00_init_response_q_entries(rsp);
733
734 return rsp->id; 714 return rsp->id;
735 715
736que_failed: 716que_failed:
@@ -744,14 +724,16 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
744 uint16_t options = 0; 724 uint16_t options = 0;
745 uint8_t ret = 0; 725 uint8_t ret = 0;
746 struct qla_hw_data *ha = vha->hw; 726 struct qla_hw_data *ha = vha->hw;
727 struct rsp_que *rsp;
747 728
748 options |= BIT_1; 729 options |= BIT_1;
749 ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0); 730 ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0, -1);
750 if (!ret) { 731 if (!ret) {
751 qla_printk(KERN_WARNING, ha, "Response Que create failed\n"); 732 qla_printk(KERN_WARNING, ha, "Response Que create failed\n");
752 return ret; 733 return ret;
753 } else 734 } else
754 qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret); 735 qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret);
736 rsp = ha->rsp_q_map[ret];
755 737
756 options = 0; 738 options = 0;
757 if (qos & BIT_7) 739 if (qos & BIT_7)
@@ -759,10 +741,11 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
759 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret, 741 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret,
760 qos & ~BIT_7); 742 qos & ~BIT_7);
761 if (ret) { 743 if (ret) {
762 vha->req_ques[0] = ret; 744 vha->req = ha->req_q_map[ret];
763 qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret); 745 qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret);
764 } else 746 } else
765 qla_printk(KERN_WARNING, ha, "Request Que create failed\n"); 747 qla_printk(KERN_WARNING, ha, "Request Que create failed\n");
748 rsp->req = ha->req_q_map[ret];
766 749
767 return ret; 750 return ret;
768} 751}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 29234ba42b42..e2647e02dac9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -187,7 +187,7 @@ static void qla2x00_sp_free_dma(srb_t *);
187/* -------------------------------------------------------------------------- */ 187/* -------------------------------------------------------------------------- */
188static int qla2x00_alloc_queues(struct qla_hw_data *ha) 188static int qla2x00_alloc_queues(struct qla_hw_data *ha)
189{ 189{
190 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues, 190 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
191 GFP_KERNEL); 191 GFP_KERNEL);
192 if (!ha->req_q_map) { 192 if (!ha->req_q_map) {
193 qla_printk(KERN_WARNING, ha, 193 qla_printk(KERN_WARNING, ha,
@@ -195,7 +195,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
195 goto fail_req_map; 195 goto fail_req_map;
196 } 196 }
197 197
198 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues, 198 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
199 GFP_KERNEL); 199 GFP_KERNEL);
200 if (!ha->rsp_q_map) { 200 if (!ha->rsp_q_map) {
201 qla_printk(KERN_WARNING, ha, 201 qla_printk(KERN_WARNING, ha,
@@ -213,16 +213,8 @@ fail_req_map:
213 return -ENOMEM; 213 return -ENOMEM;
214} 214}
215 215
216static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req, 216static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
217 struct rsp_que *rsp)
218{ 217{
219 if (rsp && rsp->ring)
220 dma_free_coherent(&ha->pdev->dev,
221 (rsp->length + 1) * sizeof(response_t),
222 rsp->ring, rsp->dma);
223
224 kfree(rsp);
225 rsp = NULL;
226 if (req && req->ring) 218 if (req && req->ring)
227 dma_free_coherent(&ha->pdev->dev, 219 dma_free_coherent(&ha->pdev->dev,
228 (req->length + 1) * sizeof(request_t), 220 (req->length + 1) * sizeof(request_t),
@@ -232,22 +224,36 @@ static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
232 req = NULL; 224 req = NULL;
233} 225}
234 226
227static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
228{
229 if (rsp && rsp->ring)
230 dma_free_coherent(&ha->pdev->dev,
231 (rsp->length + 1) * sizeof(response_t),
232 rsp->ring, rsp->dma);
233
234 kfree(rsp);
235 rsp = NULL;
236}
237
235static void qla2x00_free_queues(struct qla_hw_data *ha) 238static void qla2x00_free_queues(struct qla_hw_data *ha)
236{ 239{
237 struct req_que *req; 240 struct req_que *req;
238 struct rsp_que *rsp; 241 struct rsp_que *rsp;
239 int cnt; 242 int cnt;
240 243
241 for (cnt = 0; cnt < ha->max_queues; cnt++) { 244 for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
242 rsp = ha->rsp_q_map[cnt];
243 req = ha->req_q_map[cnt]; 245 req = ha->req_q_map[cnt];
244 qla2x00_free_que(ha, req, rsp); 246 qla2x00_free_req_que(ha, req);
245 } 247 }
246 kfree(ha->rsp_q_map);
247 ha->rsp_q_map = NULL;
248
249 kfree(ha->req_q_map); 248 kfree(ha->req_q_map);
250 ha->req_q_map = NULL; 249 ha->req_q_map = NULL;
250
251 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
252 rsp = ha->rsp_q_map[cnt];
253 qla2x00_free_rsp_que(ha, rsp);
254 }
255 kfree(ha->rsp_q_map);
256 ha->rsp_q_map = NULL;
251} 257}
252 258
253static char * 259static char *
@@ -612,7 +618,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
612void 618void
613qla2x00_abort_fcport_cmds(fc_port_t *fcport) 619qla2x00_abort_fcport_cmds(fc_port_t *fcport)
614{ 620{
615 int cnt, que, id; 621 int cnt;
616 unsigned long flags; 622 unsigned long flags;
617 srb_t *sp; 623 srb_t *sp;
618 scsi_qla_host_t *vha = fcport->vha; 624 scsi_qla_host_t *vha = fcport->vha;
@@ -620,32 +626,27 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport)
620 struct req_que *req; 626 struct req_que *req;
621 627
622 spin_lock_irqsave(&ha->hardware_lock, flags); 628 spin_lock_irqsave(&ha->hardware_lock, flags);
623 for (que = 0; que < QLA_MAX_HOST_QUES; que++) { 629 req = vha->req;
624 id = vha->req_ques[que]; 630 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
625 req = ha->req_q_map[id]; 631 sp = req->outstanding_cmds[cnt];
626 if (!req) 632 if (!sp)
633 continue;
634 if (sp->fcport != fcport)
627 continue; 635 continue;
628 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
629 sp = req->outstanding_cmds[cnt];
630 if (!sp)
631 continue;
632 if (sp->fcport != fcport)
633 continue;
634 636
635 spin_unlock_irqrestore(&ha->hardware_lock, flags); 637 spin_unlock_irqrestore(&ha->hardware_lock, flags);
636 if (ha->isp_ops->abort_command(vha, sp, req)) { 638 if (ha->isp_ops->abort_command(sp)) {
639 DEBUG2(qla_printk(KERN_WARNING, ha,
640 "Abort failed -- %lx\n",
641 sp->cmd->serial_number));
642 } else {
643 if (qla2x00_eh_wait_on_command(sp->cmd) !=
644 QLA_SUCCESS)
637 DEBUG2(qla_printk(KERN_WARNING, ha, 645 DEBUG2(qla_printk(KERN_WARNING, ha,
638 "Abort failed -- %lx\n", 646 "Abort failed while waiting -- %lx\n",
639 sp->cmd->serial_number)); 647 sp->cmd->serial_number));
640 } else {
641 if (qla2x00_eh_wait_on_command(sp->cmd) !=
642 QLA_SUCCESS)
643 DEBUG2(qla_printk(KERN_WARNING, ha,
644 "Abort failed while waiting -- %lx\n",
645 sp->cmd->serial_number));
646 }
647 spin_lock_irqsave(&ha->hardware_lock, flags);
648 } 648 }
649 spin_lock_irqsave(&ha->hardware_lock, flags);
649 } 650 }
650 spin_unlock_irqrestore(&ha->hardware_lock, flags); 651 spin_unlock_irqrestore(&ha->hardware_lock, flags);
651} 652}
@@ -726,7 +727,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
726 " pid=%ld.\n", __func__, vha->host_no, sp, serial)); 727 " pid=%ld.\n", __func__, vha->host_no, sp, serial));
727 728
728 spin_unlock_irqrestore(&ha->hardware_lock, flags); 729 spin_unlock_irqrestore(&ha->hardware_lock, flags);
729 if (ha->isp_ops->abort_command(vha, sp, req)) { 730 if (ha->isp_ops->abort_command(sp)) {
730 DEBUG2(printk("%s(%ld): abort_command " 731 DEBUG2(printk("%s(%ld): abort_command "
731 "mbx failed.\n", __func__, vha->host_no)); 732 "mbx failed.\n", __func__, vha->host_no));
732 ret = FAILED; 733 ret = FAILED;
@@ -820,7 +821,7 @@ static char *reset_errors[] = {
820 821
821static int 822static int
822__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 823__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
823 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) 824 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
824{ 825{
825 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 826 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
826 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 827 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
@@ -841,7 +842,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
841 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) 842 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
842 goto eh_reset_failed; 843 goto eh_reset_failed;
843 err = 2; 844 err = 2;
844 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) 845 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
846 != QLA_SUCCESS)
845 goto eh_reset_failed; 847 goto eh_reset_failed;
846 err = 3; 848 err = 3;
847 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 849 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
@@ -1065,7 +1067,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1065 if (fcport->port_type != FCT_TARGET) 1067 if (fcport->port_type != FCT_TARGET)
1066 continue; 1068 continue;
1067 1069
1068 ret = ha->isp_ops->target_reset(fcport, 0); 1070 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1069 if (ret != QLA_SUCCESS) { 1071 if (ret != QLA_SUCCESS) {
1070 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1072 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1071 "target_reset=%d d_id=%x.\n", __func__, 1073 "target_reset=%d d_id=%x.\n", __func__,
@@ -1089,7 +1091,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1089 struct req_que *req; 1091 struct req_que *req;
1090 1092
1091 spin_lock_irqsave(&ha->hardware_lock, flags); 1093 spin_lock_irqsave(&ha->hardware_lock, flags);
1092 for (que = 0; que < ha->max_queues; que++) { 1094 for (que = 0; que < ha->max_req_queues; que++) {
1093 req = ha->req_q_map[que]; 1095 req = ha->req_q_map[que];
1094 if (!req) 1096 if (!req)
1095 continue; 1097 continue;
@@ -1124,7 +1126,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1124 scsi_qla_host_t *vha = shost_priv(sdev->host); 1126 scsi_qla_host_t *vha = shost_priv(sdev->host);
1125 struct qla_hw_data *ha = vha->hw; 1127 struct qla_hw_data *ha = vha->hw;
1126 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1128 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1127 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 1129 struct req_que *req = vha->req;
1128 1130
1129 if (sdev->tagged_supported) 1131 if (sdev->tagged_supported)
1130 scsi_activate_tcq(sdev, req->max_q_depth); 1132 scsi_activate_tcq(sdev, req->max_q_depth);
@@ -1572,8 +1574,9 @@ skip_pio:
1572 } 1574 }
1573 1575
1574 /* Determine queue resources */ 1576 /* Determine queue resources */
1575 ha->max_queues = 1; 1577 ha->max_req_queues = ha->max_rsp_queues = 1;
1576 if (ql2xmaxqueues <= 1 || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 1578 if (ql2xmaxqueues <= 1 &&
1579 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1577 goto mqiobase_exit; 1580 goto mqiobase_exit;
1578 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1581 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1579 pci_resource_len(ha->pdev, 3)); 1582 pci_resource_len(ha->pdev, 3));
@@ -1581,20 +1584,17 @@ skip_pio:
1581 /* Read MSIX vector size of the board */ 1584 /* Read MSIX vector size of the board */
1582 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 1585 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1583 ha->msix_count = msix; 1586 ha->msix_count = msix;
1584 /* Max queues are bounded by available msix vectors */ 1587 if (ql2xmaxqueues > 1) {
1585 /* queue 0 uses two msix vectors */ 1588 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1586 if (ha->msix_count - 1 < ql2xmaxqueues) 1589 QLA_MQ_SIZE : ql2xmaxqueues;
1587 ha->max_queues = ha->msix_count - 1; 1590 DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
1588 else if (ql2xmaxqueues > QLA_MQ_SIZE) 1591 " of request queues:%d\n", ha->max_req_queues));
1589 ha->max_queues = QLA_MQ_SIZE; 1592 }
1590 else 1593 } else
1591 ha->max_queues = ql2xmaxqueues; 1594 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
1592 qla_printk(KERN_INFO, ha,
1593 "MSI-X vector count: %d\n", msix);
1594 }
1595 1595
1596mqiobase_exit: 1596mqiobase_exit:
1597 ha->msix_count = ha->max_queues + 1; 1597 ha->msix_count = ha->max_rsp_queues + 1;
1598 return (0); 1598 return (0);
1599 1599
1600iospace_error_exit: 1600iospace_error_exit:
@@ -1804,14 +1804,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1804 1804
1805 ret = -ENOMEM; 1805 ret = -ENOMEM;
1806 qla2x00_mem_free(ha); 1806 qla2x00_mem_free(ha);
1807 qla2x00_free_que(ha, req, rsp); 1807 qla2x00_free_req_que(ha, req);
1808 qla2x00_free_rsp_que(ha, rsp);
1808 goto probe_hw_failed; 1809 goto probe_hw_failed;
1809 } 1810 }
1810 1811
1811 pci_set_drvdata(pdev, base_vha); 1812 pci_set_drvdata(pdev, base_vha);
1812 1813
1813 host = base_vha->host; 1814 host = base_vha->host;
1814 base_vha->req_ques[0] = req->id; 1815 base_vha->req = req;
1815 host->can_queue = req->length + 128; 1816 host->can_queue = req->length + 128;
1816 if (IS_QLA2XXX_MIDTYPE(ha)) 1817 if (IS_QLA2XXX_MIDTYPE(ha))
1817 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; 1818 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
@@ -1842,7 +1843,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1842 } 1843 }
1843 ha->rsp_q_map[0] = rsp; 1844 ha->rsp_q_map[0] = rsp;
1844 ha->req_q_map[0] = req; 1845 ha->req_q_map[0] = req;
1845 1846 rsp->req = req;
1847 req->rsp = rsp;
1848 set_bit(0, ha->req_qid_map);
1849 set_bit(0, ha->rsp_qid_map);
1846 /* FWI2-capable only. */ 1850 /* FWI2-capable only. */
1847 req->req_q_in = &ha->iobase->isp24.req_q_in; 1851 req->req_q_in = &ha->iobase->isp24.req_q_in;
1848 req->req_q_out = &ha->iobase->isp24.req_q_out; 1852 req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -1918,8 +1922,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1918 return 0; 1922 return 0;
1919 1923
1920probe_init_failed: 1924probe_init_failed:
1921 qla2x00_free_que(ha, req, rsp); 1925 qla2x00_free_req_que(ha, req);
1922 ha->max_queues = 0; 1926 qla2x00_free_rsp_que(ha, rsp);
1927 ha->max_req_queues = ha->max_rsp_queues = 0;
1923 1928
1924probe_failed: 1929probe_failed:
1925 if (base_vha->timer_active) 1930 if (base_vha->timer_active)
@@ -2018,6 +2023,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2018{ 2023{
2019 struct qla_hw_data *ha = vha->hw; 2024 struct qla_hw_data *ha = vha->hw;
2020 2025
2026 qla25xx_delete_queues(vha);
2027
2021 if (ha->flags.fce_enabled) 2028 if (ha->flags.fce_enabled)
2022 qla2x00_disable_fce_trace(vha, NULL, NULL); 2029 qla2x00_disable_fce_trace(vha, NULL, NULL);
2023 2030
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 81187a0246cd..2a9b3f83ba67 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -920,6 +920,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
920 920
921 entry = data + sizeof(struct qla_npiv_header); 921 entry = data + sizeof(struct qla_npiv_header);
922 cnt = le16_to_cpu(hdr.entries); 922 cnt = le16_to_cpu(hdr.entries);
923 ha->flex_port_count = cnt;
923 for (i = 0; cnt; cnt--, entry++, i++) { 924 for (i = 0; cnt; cnt--, entry++, i++) {
924 uint16_t flags; 925 uint16_t flags;
925 struct fc_vport_identifiers vid; 926 struct fc_vport_identifiers vid;