aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_isr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_isr.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c335
1 files changed, 240 insertions, 95 deletions
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 89d327117aa..eb4b43d7697 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -10,10 +10,12 @@
10#include <scsi/scsi_tcq.h> 10#include <scsi/scsi_tcq.h>
11 11
12static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 12static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t); 13static void qla2x00_process_completed_request(struct scsi_qla_host *,
14static void qla2x00_status_entry(scsi_qla_host_t *, void *); 14 struct req_que *, uint32_t);
15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
15static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); 16static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
16static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *); 17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18 sts_entry_t *);
17static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *); 19static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
18 20
19/** 21/**
@@ -83,7 +85,7 @@ qla2100_intr_handler(int irq, void *dev_id)
83 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 85 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
84 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 86 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
85 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 87 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
86 qla2x00_async_event(vha, mb); 88 qla2x00_async_event(vha, rsp, mb);
87 } else { 89 } else {
88 /*EMPTY*/ 90 /*EMPTY*/
89 DEBUG2(printk("scsi(%ld): Unrecognized " 91 DEBUG2(printk("scsi(%ld): Unrecognized "
@@ -94,7 +96,7 @@ qla2100_intr_handler(int irq, void *dev_id)
94 WRT_REG_WORD(&reg->semaphore, 0); 96 WRT_REG_WORD(&reg->semaphore, 0);
95 RD_REG_WORD(&reg->semaphore); 97 RD_REG_WORD(&reg->semaphore);
96 } else { 98 } else {
97 qla2x00_process_response_queue(vha); 99 qla2x00_process_response_queue(rsp);
98 100
99 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 101 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
100 RD_REG_WORD(&reg->hccr); 102 RD_REG_WORD(&reg->hccr);
@@ -190,21 +192,21 @@ qla2300_intr_handler(int irq, void *dev_id)
190 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 192 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
191 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 193 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
192 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 194 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
193 qla2x00_async_event(vha, mb); 195 qla2x00_async_event(vha, rsp, mb);
194 break; 196 break;
195 case 0x13: 197 case 0x13:
196 qla2x00_process_response_queue(vha); 198 qla2x00_process_response_queue(rsp);
197 break; 199 break;
198 case 0x15: 200 case 0x15:
199 mb[0] = MBA_CMPLT_1_16BIT; 201 mb[0] = MBA_CMPLT_1_16BIT;
200 mb[1] = MSW(stat); 202 mb[1] = MSW(stat);
201 qla2x00_async_event(vha, mb); 203 qla2x00_async_event(vha, rsp, mb);
202 break; 204 break;
203 case 0x16: 205 case 0x16:
204 mb[0] = MBA_SCSI_COMPLETION; 206 mb[0] = MBA_SCSI_COMPLETION;
205 mb[1] = MSW(stat); 207 mb[1] = MSW(stat);
206 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 208 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
207 qla2x00_async_event(vha, mb); 209 qla2x00_async_event(vha, rsp, mb);
208 break; 210 break;
209 default: 211 default:
210 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 212 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -270,7 +272,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
270 * @mb: Mailbox registers (0 - 3) 272 * @mb: Mailbox registers (0 - 3)
271 */ 273 */
272void 274void
273qla2x00_async_event(scsi_qla_host_t *vha, uint16_t *mb) 275qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
274{ 276{
275#define LS_UNKNOWN 2 277#define LS_UNKNOWN 2
276 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 278 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
@@ -344,7 +346,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, uint16_t *mb)
344 break; 346 break;
345 347
346 for (cnt = 0; cnt < handle_cnt; cnt++) 348 for (cnt = 0; cnt < handle_cnt; cnt++)
347 qla2x00_process_completed_request(vha, handles[cnt]); 349 qla2x00_process_completed_request(vha, rsp->req,
350 handles[cnt]);
348 break; 351 break;
349 352
350 case MBA_RESET: /* Reset */ 353 case MBA_RESET: /* Reset */
@@ -554,6 +557,10 @@ qla2x00_async_event(scsi_qla_host_t *vha, uint16_t *mb)
554 break; 557 break;
555 558
556 case MBA_PORT_UPDATE: /* Port database update */ 559 case MBA_PORT_UPDATE: /* Port database update */
560 /* Only handle SCNs for our Vport index. */
561 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
562 break;
563
557 /* 564 /*
558 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 565 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
559 * event etc. earlier indicating loop is down) then process 566 * event etc. earlier indicating loop is down) then process
@@ -641,9 +648,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, uint16_t *mb)
641 vha->host_no)); 648 vha->host_no));
642 649
643 if (IS_FWI2_CAPABLE(ha)) 650 if (IS_FWI2_CAPABLE(ha))
644 qla24xx_process_response_queue(vha); 651 qla24xx_process_response_queue(rsp);
645 else 652 else
646 qla2x00_process_response_queue(vha); 653 qla2x00_process_response_queue(rsp);
647 break; 654 break;
648 655
649 case MBA_DISCARD_RND_FRAME: 656 case MBA_DISCARD_RND_FRAME:
@@ -694,15 +701,21 @@ qla2x00_async_event(scsi_qla_host_t *vha, uint16_t *mb)
694 } 701 }
695 702
696 if (!vha->vp_idx && ha->num_vhosts) 703 if (!vha->vp_idx && ha->num_vhosts)
697 qla2x00_alert_all_vps(ha, mb); 704 qla2x00_alert_all_vps(rsp, mb);
698} 705}
699 706
700static void 707static void
701qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) 708qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
702{ 709{
703 fc_port_t *fcport = data; 710 fc_port_t *fcport = data;
704 struct qla_hw_data *ha = fcport->vha->hw; 711 struct scsi_qla_host *vha = fcport->vha;
705 if (ha->req->max_q_depth <= sdev->queue_depth) 712 struct qla_hw_data *ha = vha->hw;
713 struct req_que *req = NULL;
714
715 req = ha->req_q_map[vha->req_ques[0]];
716 if (!req)
717 return;
718 if (req->max_q_depth <= sdev->queue_depth)
706 return; 719 return;
707 720
708 if (sdev->ordered_tags) 721 if (sdev->ordered_tags)
@@ -735,14 +748,14 @@ qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
735} 748}
736 749
737static inline void 750static inline void
738qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, srb_t *sp) 751qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
752 srb_t *sp)
739{ 753{
740 fc_port_t *fcport; 754 fc_port_t *fcport;
741 struct scsi_device *sdev; 755 struct scsi_device *sdev;
742 struct qla_hw_data *ha = vha->hw;
743 756
744 sdev = sp->cmd->device; 757 sdev = sp->cmd->device;
745 if (sdev->queue_depth >= ha->req->max_q_depth) 758 if (sdev->queue_depth >= req->max_q_depth)
746 return; 759 return;
747 760
748 fcport = sp->fcport; 761 fcport = sp->fcport;
@@ -763,11 +776,11 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, srb_t *sp)
763 * @index: SRB index 776 * @index: SRB index
764 */ 777 */
765static void 778static void
766qla2x00_process_completed_request(struct scsi_qla_host *vha, uint32_t index) 779qla2x00_process_completed_request(struct scsi_qla_host *vha,
780 struct req_que *req, uint32_t index)
767{ 781{
768 srb_t *sp; 782 srb_t *sp;
769 struct qla_hw_data *ha = vha->hw; 783 struct qla_hw_data *ha = vha->hw;
770 struct req_que *req = ha->req;
771 784
772 /* Validate handle. */ 785 /* Validate handle. */
773 if (index >= MAX_OUTSTANDING_COMMANDS) { 786 if (index >= MAX_OUTSTANDING_COMMANDS) {
@@ -791,8 +804,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, uint32_t index)
791 /* Save ISP completion status */ 804 /* Save ISP completion status */
792 sp->cmd->result = DID_OK << 16; 805 sp->cmd->result = DID_OK << 16;
793 806
794 qla2x00_ramp_up_queue_depth(vha, sp); 807 qla2x00_ramp_up_queue_depth(vha, req, sp);
795 qla2x00_sp_compl(vha, sp); 808 qla2x00_sp_compl(ha, sp);
796 } else { 809 } else {
797 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", 810 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
798 vha->host_no)); 811 vha->host_no));
@@ -808,14 +821,16 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, uint32_t index)
808 * @ha: SCSI driver HA context 821 * @ha: SCSI driver HA context
809 */ 822 */
810void 823void
811qla2x00_process_response_queue(struct scsi_qla_host *vha) 824qla2x00_process_response_queue(struct rsp_que *rsp)
812{ 825{
813 struct qla_hw_data *ha = vha->hw; 826 struct scsi_qla_host *vha;
827 struct qla_hw_data *ha = rsp->hw;
814 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 828 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
815 sts_entry_t *pkt; 829 sts_entry_t *pkt;
816 uint16_t handle_cnt; 830 uint16_t handle_cnt;
817 uint16_t cnt; 831 uint16_t cnt;
818 struct rsp_que *rsp = ha->rsp; 832
833 vha = qla2x00_get_rsp_host(rsp);
819 834
820 if (!vha->flags.online) 835 if (!vha->flags.online)
821 return; 836 return;
@@ -835,7 +850,7 @@ qla2x00_process_response_queue(struct scsi_qla_host *vha)
835 DEBUG3(printk(KERN_INFO 850 DEBUG3(printk(KERN_INFO
836 "scsi(%ld): Process error entry.\n", vha->host_no)); 851 "scsi(%ld): Process error entry.\n", vha->host_no));
837 852
838 qla2x00_error_entry(vha, pkt); 853 qla2x00_error_entry(vha, rsp, pkt);
839 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 854 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
840 wmb(); 855 wmb();
841 continue; 856 continue;
@@ -843,19 +858,19 @@ qla2x00_process_response_queue(struct scsi_qla_host *vha)
843 858
844 switch (pkt->entry_type) { 859 switch (pkt->entry_type) {
845 case STATUS_TYPE: 860 case STATUS_TYPE:
846 qla2x00_status_entry(vha, pkt); 861 qla2x00_status_entry(vha, rsp, pkt);
847 break; 862 break;
848 case STATUS_TYPE_21: 863 case STATUS_TYPE_21:
849 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 864 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
850 for (cnt = 0; cnt < handle_cnt; cnt++) { 865 for (cnt = 0; cnt < handle_cnt; cnt++) {
851 qla2x00_process_completed_request(vha, 866 qla2x00_process_completed_request(vha, rsp->req,
852 ((sts21_entry_t *)pkt)->handle[cnt]); 867 ((sts21_entry_t *)pkt)->handle[cnt]);
853 } 868 }
854 break; 869 break;
855 case STATUS_TYPE_22: 870 case STATUS_TYPE_22:
856 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 871 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
857 for (cnt = 0; cnt < handle_cnt; cnt++) { 872 for (cnt = 0; cnt < handle_cnt; cnt++) {
858 qla2x00_process_completed_request(vha, 873 qla2x00_process_completed_request(vha, rsp->req,
859 ((sts22_entry_t *)pkt)->handle[cnt]); 874 ((sts22_entry_t *)pkt)->handle[cnt]);
860 } 875 }
861 break; 876 break;
@@ -914,7 +929,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
914 * @pkt: Entry pointer 929 * @pkt: Entry pointer
915 */ 930 */
916static void 931static void
917qla2x00_status_entry(scsi_qla_host_t *vha, void *pkt) 932qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
918{ 933{
919 srb_t *sp; 934 srb_t *sp;
920 fc_port_t *fcport; 935 fc_port_t *fcport;
@@ -928,7 +943,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, void *pkt)
928 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; 943 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
929 uint8_t *rsp_info, *sense_data; 944 uint8_t *rsp_info, *sense_data;
930 struct qla_hw_data *ha = vha->hw; 945 struct qla_hw_data *ha = vha->hw;
931 struct req_que *req = ha->req; 946 struct req_que *req = rsp->req;
932 947
933 sts = (sts_entry_t *) pkt; 948 sts = (sts_entry_t *) pkt;
934 sts24 = (struct sts_entry_24xx *) pkt; 949 sts24 = (struct sts_entry_24xx *) pkt;
@@ -942,7 +957,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, void *pkt)
942 957
943 /* Fast path completion. */ 958 /* Fast path completion. */
944 if (comp_status == CS_COMPLETE && scsi_status == 0) { 959 if (comp_status == CS_COMPLETE && scsi_status == 0) {
945 qla2x00_process_completed_request(vha, sts->handle); 960 qla2x00_process_completed_request(vha, req, sts->handle);
946 961
947 return; 962 return;
948 } 963 }
@@ -1012,7 +1027,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, void *pkt)
1012 rsp_info[5], rsp_info[6], rsp_info[7])); 1027 rsp_info[5], rsp_info[6], rsp_info[7]));
1013 1028
1014 cp->result = DID_BUS_BUSY << 16; 1029 cp->result = DID_BUS_BUSY << 16;
1015 qla2x00_sp_compl(vha, sp); 1030 qla2x00_sp_compl(ha, sp);
1016 return; 1031 return;
1017 } 1032 }
1018 } 1033 }
@@ -1276,7 +1291,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, void *pkt)
1276 1291
1277 /* Place command on done queue. */ 1292 /* Place command on done queue. */
1278 if (vha->status_srb == NULL) 1293 if (vha->status_srb == NULL)
1279 qla2x00_sp_compl(vha, sp); 1294 qla2x00_sp_compl(ha, sp);
1280} 1295}
1281 1296
1282/** 1297/**
@@ -1325,7 +1340,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1325 /* Place command on done queue. */ 1340 /* Place command on done queue. */
1326 if (sp->request_sense_length == 0) { 1341 if (sp->request_sense_length == 0) {
1327 vha->status_srb = NULL; 1342 vha->status_srb = NULL;
1328 qla2x00_sp_compl(vha, sp); 1343 qla2x00_sp_compl(ha, sp);
1329 } 1344 }
1330 } 1345 }
1331} 1346}
@@ -1336,11 +1351,11 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1336 * @pkt: Entry pointer 1351 * @pkt: Entry pointer
1337 */ 1352 */
1338static void 1353static void
1339qla2x00_error_entry(scsi_qla_host_t *vha, sts_entry_t *pkt) 1354qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1340{ 1355{
1341 srb_t *sp; 1356 srb_t *sp;
1342 struct qla_hw_data *ha = vha->hw; 1357 struct qla_hw_data *ha = vha->hw;
1343 struct req_que *req = ha->req; 1358 struct req_que *req = rsp->req;
1344#if defined(QL_DEBUG_LEVEL_2) 1359#if defined(QL_DEBUG_LEVEL_2)
1345 if (pkt->entry_status & RF_INV_E_ORDER) 1360 if (pkt->entry_status & RF_INV_E_ORDER)
1346 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1361 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
@@ -1377,7 +1392,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, sts_entry_t *pkt)
1377 } else { 1392 } else {
1378 sp->cmd->result = DID_ERROR << 16; 1393 sp->cmd->result = DID_ERROR << 16;
1379 } 1394 }
1380 qla2x00_sp_compl(vha, sp); 1395 qla2x00_sp_compl(ha, sp);
1381 1396
1382 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1397 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1383 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) { 1398 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
@@ -1428,12 +1443,14 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1428 * @ha: SCSI driver HA context 1443 * @ha: SCSI driver HA context
1429 */ 1444 */
1430void 1445void
1431qla24xx_process_response_queue(struct scsi_qla_host *vha) 1446qla24xx_process_response_queue(struct rsp_que *rsp)
1432{ 1447{
1433 struct qla_hw_data *ha = vha->hw; 1448 struct qla_hw_data *ha = rsp->hw;
1434 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1449 device_reg_t __iomem *reg = ISP_QUE_REG(ha, rsp->id);
1435 struct sts_entry_24xx *pkt; 1450 struct sts_entry_24xx *pkt;
1436 struct rsp_que *rsp = ha->rsp; 1451 struct scsi_qla_host *vha;
1452
1453 vha = qla2x00_get_rsp_host(rsp);
1437 1454
1438 if (!vha->flags.online) 1455 if (!vha->flags.online)
1439 return; 1456 return;
@@ -1453,7 +1470,7 @@ qla24xx_process_response_queue(struct scsi_qla_host *vha)
1453 DEBUG3(printk(KERN_INFO 1470 DEBUG3(printk(KERN_INFO
1454 "scsi(%ld): Process error entry.\n", vha->host_no)); 1471 "scsi(%ld): Process error entry.\n", vha->host_no));
1455 1472
1456 qla2x00_error_entry(vha, (sts_entry_t *) pkt); 1473 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
1457 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1474 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1458 wmb(); 1475 wmb();
1459 continue; 1476 continue;
@@ -1461,7 +1478,7 @@ qla24xx_process_response_queue(struct scsi_qla_host *vha)
1461 1478
1462 switch (pkt->entry_type) { 1479 switch (pkt->entry_type) {
1463 case STATUS_TYPE: 1480 case STATUS_TYPE:
1464 qla2x00_status_entry(vha, pkt); 1481 qla2x00_status_entry(vha, rsp, pkt);
1465 break; 1482 break;
1466 case STATUS_CONT_TYPE: 1483 case STATUS_CONT_TYPE:
1467 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); 1484 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
@@ -1483,7 +1500,10 @@ qla24xx_process_response_queue(struct scsi_qla_host *vha)
1483 } 1500 }
1484 1501
1485 /* Adjust ring index */ 1502 /* Adjust ring index */
1486 WRT_REG_DWORD(&reg->rsp_q_out, rsp->ring_index); 1503 if (ha->mqenable)
1504 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, rsp->ring_index);
1505 else
1506 WRT_REG_DWORD(&reg->isp24.rsp_q_out, rsp->ring_index);
1487} 1507}
1488 1508
1489static void 1509static void
@@ -1607,10 +1627,11 @@ qla24xx_intr_handler(int irq, void *dev_id)
1607 mb[1] = RD_REG_WORD(&reg->mailbox1); 1627 mb[1] = RD_REG_WORD(&reg->mailbox1);
1608 mb[2] = RD_REG_WORD(&reg->mailbox2); 1628 mb[2] = RD_REG_WORD(&reg->mailbox2);
1609 mb[3] = RD_REG_WORD(&reg->mailbox3); 1629 mb[3] = RD_REG_WORD(&reg->mailbox3);
1610 qla2x00_async_event(vha, mb); 1630 qla2x00_async_event(vha, rsp, mb);
1611 break; 1631 break;
1612 case 0x13: 1632 case 0x13:
1613 qla24xx_process_response_queue(vha); 1633 case 0x14:
1634 qla24xx_process_response_queue(rsp);
1614 break; 1635 break;
1615 default: 1636 default:
1616 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1637 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1635,7 +1656,6 @@ qla24xx_intr_handler(int irq, void *dev_id)
1635static irqreturn_t 1656static irqreturn_t
1636qla24xx_msix_rsp_q(int irq, void *dev_id) 1657qla24xx_msix_rsp_q(int irq, void *dev_id)
1637{ 1658{
1638 scsi_qla_host_t *vha;
1639 struct qla_hw_data *ha; 1659 struct qla_hw_data *ha;
1640 struct rsp_que *rsp; 1660 struct rsp_que *rsp;
1641 struct device_reg_24xx __iomem *reg; 1661 struct device_reg_24xx __iomem *reg;
@@ -1651,8 +1671,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1651 1671
1652 spin_lock_irq(&ha->hardware_lock); 1672 spin_lock_irq(&ha->hardware_lock);
1653 1673
1654 vha = qla2x00_get_rsp_host(rsp); 1674 qla24xx_process_response_queue(rsp);
1655 qla24xx_process_response_queue(vha);
1656 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1675 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1657 1676
1658 spin_unlock_irq(&ha->hardware_lock); 1677 spin_unlock_irq(&ha->hardware_lock);
@@ -1661,6 +1680,41 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1661} 1680}
1662 1681
1663static irqreturn_t 1682static irqreturn_t
1683qla25xx_msix_rsp_q(int irq, void *dev_id)
1684{
1685 struct qla_hw_data *ha;
1686 struct rsp_que *rsp;
1687 struct device_reg_24xx __iomem *reg;
1688 uint16_t msix_disabled_hccr = 0;
1689
1690 rsp = (struct rsp_que *) dev_id;
1691 if (!rsp) {
1692 printk(KERN_INFO
1693 "%s(): NULL response queue pointer\n", __func__);
1694 return IRQ_NONE;
1695 }
1696 ha = rsp->hw;
1697 reg = &ha->iobase->isp24;
1698
1699 spin_lock_irq(&ha->hardware_lock);
1700
1701 msix_disabled_hccr = rsp->options;
1702 if (!rsp->id)
1703 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22);
1704 else
1705 msix_disabled_hccr &= BIT_6;
1706
1707 qla24xx_process_response_queue(rsp);
1708
1709 if (!msix_disabled_hccr)
1710 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1711
1712 spin_unlock_irq(&ha->hardware_lock);
1713
1714 return IRQ_HANDLED;
1715}
1716
1717static irqreturn_t
1664qla24xx_msix_default(int irq, void *dev_id) 1718qla24xx_msix_default(int irq, void *dev_id)
1665{ 1719{
1666 scsi_qla_host_t *vha; 1720 scsi_qla_host_t *vha;
@@ -1723,10 +1777,11 @@ qla24xx_msix_default(int irq, void *dev_id)
1723 mb[1] = RD_REG_WORD(&reg->mailbox1); 1777 mb[1] = RD_REG_WORD(&reg->mailbox1);
1724 mb[2] = RD_REG_WORD(&reg->mailbox2); 1778 mb[2] = RD_REG_WORD(&reg->mailbox2);
1725 mb[3] = RD_REG_WORD(&reg->mailbox3); 1779 mb[3] = RD_REG_WORD(&reg->mailbox3);
1726 qla2x00_async_event(vha, mb); 1780 qla2x00_async_event(vha, rsp, mb);
1727 break; 1781 break;
1728 case 0x13: 1782 case 0x13:
1729 qla24xx_process_response_queue(vha); 1783 case 0x14:
1784 qla24xx_process_response_queue(rsp);
1730 break; 1785 break;
1731 default: 1786 default:
1732 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1787 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1756,12 +1811,25 @@ struct qla_init_msix_entry {
1756 irq_handler_t handler; 1811 irq_handler_t handler;
1757}; 1812};
1758 1813
1759static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = { 1814static struct qla_init_msix_entry base_queue = {
1760 { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT, 1815 .entry = 0,
1761 "qla2xxx (default)", qla24xx_msix_default }, 1816 .index = 0,
1817 .name = "qla2xxx (default)",
1818 .handler = qla24xx_msix_default,
1819};
1820
1821static struct qla_init_msix_entry base_rsp_queue = {
1822 .entry = 1,
1823 .index = 1,
1824 .name = "qla2xxx (rsp_q)",
1825 .handler = qla24xx_msix_rsp_q,
1826};
1762 1827
1763 { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q, 1828static struct qla_init_msix_entry multi_rsp_queue = {
1764 "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 1829 .entry = 1,
1830 .index = 1,
1831 .name = "qla2xxx (multi_q)",
1832 .handler = qla25xx_msix_rsp_q,
1765}; 1833};
1766 1834
1767static void 1835static void
@@ -1769,63 +1837,115 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
1769{ 1837{
1770 int i; 1838 int i;
1771 struct qla_msix_entry *qentry; 1839 struct qla_msix_entry *qentry;
1772 struct rsp_que *rsp = ha->rsp;
1773 1840
1774 for (i = 0; i < QLA_MSIX_ENTRIES; i++) { 1841 for (i = 0; i < ha->msix_count; i++) {
1775 qentry = &ha->msix_entries[imsix_entries[i].index]; 1842 qentry = &ha->msix_entries[i];
1776 if (qentry->have_irq) 1843 if (qentry->have_irq)
1777 free_irq(qentry->msix_vector, rsp); 1844 free_irq(qentry->vector, qentry->rsp);
1778 } 1845 }
1779 pci_disable_msix(ha->pdev); 1846 pci_disable_msix(ha->pdev);
1847 kfree(ha->msix_entries);
1848 ha->msix_entries = NULL;
1849 ha->flags.msix_enabled = 0;
1780} 1850}
1781 1851
1782static int 1852static int
1783qla24xx_enable_msix(struct qla_hw_data *ha) 1853qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1784{ 1854{
1785 int i, ret; 1855 int i, ret;
1786 struct rsp_que *rsp = ha->rsp; 1856 struct msix_entry *entries;
1787 struct msix_entry entries[QLA_MSIX_ENTRIES];
1788 struct qla_msix_entry *qentry; 1857 struct qla_msix_entry *qentry;
1858 struct qla_init_msix_entry *msix_queue;
1859
1860 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1861 GFP_KERNEL);
1862 if (!entries)
1863 return -ENOMEM;
1789 1864
1790 for (i = 0; i < QLA_MSIX_ENTRIES; i++) 1865 for (i = 0; i < ha->msix_count; i++)
1791 entries[i].entry = imsix_entries[i].entry; 1866 entries[i].entry = i;
1792 1867
1793 ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries)); 1868 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1794 if (ret) { 1869 if (ret) {
1795 qla_printk(KERN_WARNING, ha, 1870 qla_printk(KERN_WARNING, ha,
1796 "MSI-X: Failed to enable support -- %d/%d\n", 1871 "MSI-X: Failed to enable support -- %d/%d\n"
1797 QLA_MSIX_ENTRIES, ret); 1872 " Retry with %d vectors\n", ha->msix_count, ret, ret);
1873 ha->msix_count = ret;
1874 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1875 if (ret) {
1876 qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
1877 " support, giving up -- %d/%d\n",
1878 ha->msix_count, ret);
1879 goto msix_out;
1880 }
1881 ha->max_queues = ha->msix_count - 1;
1882 }
1883 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1884 ha->msix_count, GFP_KERNEL);
1885 if (!ha->msix_entries) {
1886 ret = -ENOMEM;
1798 goto msix_out; 1887 goto msix_out;
1799 } 1888 }
1800 ha->flags.msix_enabled = 1; 1889 ha->flags.msix_enabled = 1;
1801 1890
1802 for (i = 0; i < QLA_MSIX_ENTRIES; i++) { 1891 for (i = 0; i < ha->msix_count; i++) {
1803 qentry = &ha->msix_entries[imsix_entries[i].index]; 1892 qentry = &ha->msix_entries[i];
1804 qentry->msix_vector = entries[i].vector; 1893 qentry->vector = entries[i].vector;
1805 qentry->msix_entry = entries[i].entry; 1894 qentry->entry = entries[i].entry;
1806 qentry->have_irq = 0; 1895 qentry->have_irq = 0;
1807 ret = request_irq(qentry->msix_vector, 1896 qentry->rsp = NULL;
1808 imsix_entries[i].handler, 0, imsix_entries[i].name, rsp);
1809 if (ret) {
1810 qla_printk(KERN_WARNING, ha,
1811 "MSI-X: Unable to register handler -- %x/%d.\n",
1812 imsix_entries[i].index, ret);
1813 qla24xx_disable_msix(ha);
1814 goto msix_out;
1815 }
1816 qentry->have_irq = 1;
1817 } 1897 }
1818 1898
1899 /* Enable MSI-X for AENs for queue 0 */
1900 qentry = &ha->msix_entries[0];
1901 ret = request_irq(qentry->vector, base_queue.handler, 0,
1902 base_queue.name, rsp);
1903 if (ret) {
1904 qla_printk(KERN_WARNING, ha,
1905 "MSI-X: Unable to register handler -- %x/%d.\n",
1906 qentry->vector, ret);
1907 qla24xx_disable_msix(ha);
1908 goto msix_out;
1909 }
1910 qentry->have_irq = 1;
1911 qentry->rsp = rsp;
1912
1913 /* Enable MSI-X vector for response queue update for queue 0 */
1914 if (ha->max_queues > 1 && ha->mqiobase) {
1915 ha->mqenable = 1;
1916 msix_queue = &multi_rsp_queue;
1917 qla_printk(KERN_INFO, ha,
1918 "MQ enabled, Number of Queue Resources: %d \n",
1919 ha->max_queues);
1920 } else {
1921 ha->mqenable = 0;
1922 msix_queue = &base_rsp_queue;
1923 }
1924
1925 qentry = &ha->msix_entries[1];
1926 ret = request_irq(qentry->vector, msix_queue->handler, 0,
1927 msix_queue->name, rsp);
1928 if (ret) {
1929 qla_printk(KERN_WARNING, ha,
1930 "MSI-X: Unable to register handler -- %x/%d.\n",
1931 qentry->vector, ret);
1932 qla24xx_disable_msix(ha);
1933 ha->mqenable = 0;
1934 goto msix_out;
1935 }
1936 qentry->have_irq = 1;
1937 qentry->rsp = rsp;
1938
1819msix_out: 1939msix_out:
1940 kfree(entries);
1820 return ret; 1941 return ret;
1821} 1942}
1822 1943
1823int 1944int
1824qla2x00_request_irqs(struct qla_hw_data *ha) 1945qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
1825{ 1946{
1826 int ret; 1947 int ret;
1827 device_reg_t __iomem *reg = ha->iobase; 1948 device_reg_t __iomem *reg = ha->iobase;
1828 struct rsp_que *rsp = ha->rsp;
1829 1949
1830 /* If possible, enable MSI-X. */ 1950 /* If possible, enable MSI-X. */
1831 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha)) 1951 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
@@ -1852,7 +1972,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha)
1852 goto skip_msi; 1972 goto skip_msi;
1853 } 1973 }
1854 1974
1855 ret = qla24xx_enable_msix(ha); 1975 ret = qla24xx_enable_msix(ha, rsp);
1856 if (!ret) { 1976 if (!ret) {
1857 DEBUG2(qla_printk(KERN_INFO, ha, 1977 DEBUG2(qla_printk(KERN_INFO, ha,
1858 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, 1978 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
@@ -1903,7 +2023,7 @@ void
1903qla2x00_free_irqs(scsi_qla_host_t *vha) 2023qla2x00_free_irqs(scsi_qla_host_t *vha)
1904{ 2024{
1905 struct qla_hw_data *ha = vha->hw; 2025 struct qla_hw_data *ha = vha->hw;
1906 struct rsp_que *rsp = ha->rsp; 2026 struct rsp_que *rsp = ha->rsp_q_map[0];
1907 2027
1908 if (ha->flags.msix_enabled) 2028 if (ha->flags.msix_enabled)
1909 qla24xx_disable_msix(ha); 2029 qla24xx_disable_msix(ha);
@@ -1919,16 +2039,41 @@ qla2x00_get_rsp_host(struct rsp_que *rsp)
1919 srb_t *sp; 2039 srb_t *sp;
1920 struct qla_hw_data *ha = rsp->hw; 2040 struct qla_hw_data *ha = rsp->hw;
1921 struct scsi_qla_host *vha = NULL; 2041 struct scsi_qla_host *vha = NULL;
1922 struct sts_entry_24xx *pkt = (struct sts_entry_24xx *) rsp->ring_ptr; 2042 struct sts_entry_24xx *pkt;
1923 2043 struct req_que *req;
1924 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) { 2044
1925 sp = ha->req->outstanding_cmds[pkt->handle]; 2045 if (rsp->id) {
1926 if (sp) 2046 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
1927 vha = sp->vha; 2047 req = rsp->req;
2048 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2049 sp = req->outstanding_cmds[pkt->handle];
2050 if (sp)
2051 vha = sp->vha;
2052 }
1928 } 2053 }
1929 if (!vha) 2054 if (!vha)
1930 /* Invalid entry, handle it in base queue */ 2055 /* handle it in base queue */
1931 vha = pci_get_drvdata(ha->pdev); 2056 vha = pci_get_drvdata(ha->pdev);
1932 2057
1933 return vha; 2058 return vha;
1934} 2059}
2060
2061int qla25xx_request_irq(struct rsp_que *rsp)
2062{
2063 struct qla_hw_data *ha = rsp->hw;
2064 struct qla_init_msix_entry *intr = &multi_rsp_queue;
2065 struct qla_msix_entry *msix = rsp->msix;
2066 int ret;
2067
2068 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2069 if (ret) {
2070 qla_printk(KERN_WARNING, ha,
2071 "MSI-X: Unable to register handler -- %x/%d.\n",
2072 msix->vector, ret);
2073 return ret;
2074 }
2075 msix->have_irq = 1;
2076 msix->rsp = rsp;
2077 return ret;
2078}
2079