aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_os.c
diff options
context:
space:
mode:
authorAnirban Chakraborty <anirban.chakraborty@qlogic.com>2008-12-09 19:45:39 -0500
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2008-12-29 12:24:33 -0500
commit73208dfd7ab19f379d73e8a0fbf30f92c203e5e8 (patch)
treef69be5e89817d17b066ece4dbe04e395339c0754 /drivers/scsi/qla2xxx/qla_os.c
parent85b4aa4926a50210b683ac89326e338e7d131211 (diff)
[SCSI] qla2xxx: add support for multi-queue adapter
Following changes have been made. 1. qla_hw_data structure holds an array for request queue pointers, and an array for response queue pointers. 2. The base request and response queues are created by default. 3. Additional request and response queues are created at the time of vport creation. If queue resources are exhausted during vport creation, newly created vports use the default queue. 4. Requests are sent to the request queue that the vport was assigned in the beginning. 5. Responses are completed on the response queue with which the request queue is associated with. [fixup memcpy argument reversal spotted by davej@redhat.com] Signed-off-by: Anirban Chakraborty <anirban.chakraborty@qlogic.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_os.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c423
1 files changed, 275 insertions, 148 deletions
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f6365884c97b..9142025db3d8 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -92,7 +92,12 @@ MODULE_PARM_DESC(ql2xiidmaenable,
92 "Enables iIDMA settings " 92 "Enables iIDMA settings "
93 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 93 "Default is 1 - perform iIDMA. 0 - no iIDMA.");
94 94
95 95int ql2xmaxqueues = 1;
96module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR);
97MODULE_PARM_DESC(ql2xmaxqueues,
98 "Enables MQ settings "
99 "Default is 1 for single queue. Set it to number \
100 of queues in MQ mode.");
96/* 101/*
97 * SCSI host template entry points 102 * SCSI host template entry points
98 */ 103 */
@@ -210,11 +215,77 @@ static int qla2x00_do_dpc(void *data);
210 215
211static void qla2x00_rst_aen(scsi_qla_host_t *); 216static void qla2x00_rst_aen(scsi_qla_host_t *);
212 217
213static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t); 218static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
219 struct req_que **, struct rsp_que **);
214static void qla2x00_mem_free(struct qla_hw_data *); 220static void qla2x00_mem_free(struct qla_hw_data *);
215static void qla2x00_sp_free_dma(srb_t *); 221static void qla2x00_sp_free_dma(srb_t *);
216 222
217/* -------------------------------------------------------------------------- */ 223/* -------------------------------------------------------------------------- */
224static int qla2x00_alloc_queues(struct qla_hw_data *ha)
225{
226 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues,
227 GFP_KERNEL);
228 if (!ha->req_q_map) {
229 qla_printk(KERN_WARNING, ha,
230 "Unable to allocate memory for request queue ptrs\n");
231 goto fail_req_map;
232 }
233
234 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues,
235 GFP_KERNEL);
236 if (!ha->rsp_q_map) {
237 qla_printk(KERN_WARNING, ha,
238 "Unable to allocate memory for response queue ptrs\n");
239 goto fail_rsp_map;
240 }
241 set_bit(0, ha->rsp_qid_map);
242 set_bit(0, ha->req_qid_map);
243 return 1;
244
245fail_rsp_map:
246 kfree(ha->req_q_map);
247 ha->req_q_map = NULL;
248fail_req_map:
249 return -ENOMEM;
250}
251
252static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
253 struct rsp_que *rsp)
254{
255 if (rsp && rsp->ring)
256 dma_free_coherent(&ha->pdev->dev,
257 (rsp->length + 1) * sizeof(response_t),
258 rsp->ring, rsp->dma);
259
260 kfree(rsp);
261 rsp = NULL;
262 if (req && req->ring)
263 dma_free_coherent(&ha->pdev->dev,
264 (req->length + 1) * sizeof(request_t),
265 req->ring, req->dma);
266
267 kfree(req);
268 req = NULL;
269}
270
271static void qla2x00_free_queues(struct qla_hw_data *ha)
272{
273 struct req_que *req;
274 struct rsp_que *rsp;
275 int cnt;
276
277 for (cnt = 0; cnt < ha->max_queues; cnt++) {
278 rsp = ha->rsp_q_map[cnt];
279 req = ha->req_q_map[cnt];
280 qla2x00_free_que(ha, req, rsp);
281 }
282 kfree(ha->rsp_q_map);
283 ha->rsp_q_map = NULL;
284
285 kfree(ha->req_q_map);
286 ha->req_q_map = NULL;
287}
288
218static char * 289static char *
219qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str) 290qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
220{ 291{
@@ -629,34 +700,40 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
629void 700void
630qla2x00_abort_fcport_cmds(fc_port_t *fcport) 701qla2x00_abort_fcport_cmds(fc_port_t *fcport)
631{ 702{
632 int cnt; 703 int cnt, que, id;
633 unsigned long flags; 704 unsigned long flags;
634 srb_t *sp; 705 srb_t *sp;
635 scsi_qla_host_t *vha = fcport->vha; 706 scsi_qla_host_t *vha = fcport->vha;
636 struct qla_hw_data *ha = vha->hw; 707 struct qla_hw_data *ha = vha->hw;
637 struct req_que *req = ha->req; 708 struct req_que *req;
638 709
639 spin_lock_irqsave(&ha->hardware_lock, flags); 710 spin_lock_irqsave(&ha->hardware_lock, flags);
640 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 711 for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
641 sp = req->outstanding_cmds[cnt]; 712 id = vha->req_ques[que];
642 if (!sp) 713 req = ha->req_q_map[id];
643 continue; 714 if (!req)
644 if (sp->fcport != fcport)
645 continue; 715 continue;
716 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
717 sp = req->outstanding_cmds[cnt];
718 if (!sp)
719 continue;
720 if (sp->fcport != fcport)
721 continue;
646 722
647 spin_unlock_irqrestore(&ha->hardware_lock, flags); 723 spin_unlock_irqrestore(&ha->hardware_lock, flags);
648 if (ha->isp_ops->abort_command(vha, sp)) { 724 if (ha->isp_ops->abort_command(vha, sp, req)) {
649 DEBUG2(qla_printk(KERN_WARNING, ha,
650 "Abort failed -- %lx\n", sp->cmd->serial_number));
651 } else {
652 if (qla2x00_eh_wait_on_command(sp->cmd) !=
653 QLA_SUCCESS)
654 DEBUG2(qla_printk(KERN_WARNING, ha, 725 DEBUG2(qla_printk(KERN_WARNING, ha,
655 "Abort failed while waiting -- %lx\n", 726 "Abort failed -- %lx\n",
656 sp->cmd->serial_number)); 727 sp->cmd->serial_number));
657 728 } else {
729 if (qla2x00_eh_wait_on_command(sp->cmd) !=
730 QLA_SUCCESS)
731 DEBUG2(qla_printk(KERN_WARNING, ha,
732 "Abort failed while waiting -- %lx\n",
733 sp->cmd->serial_number));
734 }
735 spin_lock_irqsave(&ha->hardware_lock, flags);
658 } 736 }
659 spin_lock_irqsave(&ha->hardware_lock, flags);
660 } 737 }
661 spin_unlock_irqrestore(&ha->hardware_lock, flags); 738 spin_unlock_irqrestore(&ha->hardware_lock, flags);
662} 739}
@@ -698,13 +775,13 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
698{ 775{
699 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 776 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
700 srb_t *sp; 777 srb_t *sp;
701 int ret, i; 778 int ret, i, que;
702 unsigned int id, lun; 779 unsigned int id, lun;
703 unsigned long serial; 780 unsigned long serial;
704 unsigned long flags; 781 unsigned long flags;
705 int wait = 0; 782 int wait = 0;
706 struct qla_hw_data *ha = vha->hw; 783 struct qla_hw_data *ha = vha->hw;
707 struct req_que *req = ha->req; 784 struct req_que *req;
708 785
709 qla2x00_block_error_handler(cmd); 786 qla2x00_block_error_handler(cmd);
710 787
@@ -719,31 +796,34 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
719 796
720 /* Check active list for command command. */ 797 /* Check active list for command command. */
721 spin_lock_irqsave(&ha->hardware_lock, flags); 798 spin_lock_irqsave(&ha->hardware_lock, flags);
722 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) { 799 for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
723 sp = req->outstanding_cmds[i]; 800 req = ha->req_q_map[vha->req_ques[que]];
724 801 if (!req)
725 if (sp == NULL)
726 continue; 802 continue;
803 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
804 sp = req->outstanding_cmds[i];
727 805
728 if (sp->cmd != cmd) 806 if (sp == NULL)
729 continue; 807 continue;
730 808
731 DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n", 809 if (sp->cmd != cmd)
732 __func__, vha->host_no, sp, serial)); 810 continue;
733 811
734 spin_unlock_irqrestore(&ha->hardware_lock, flags); 812 DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
735 if (ha->isp_ops->abort_command(vha, sp)) { 813 " pid=%ld.\n", __func__, vha->host_no, sp, serial));
736 DEBUG2(printk("%s(%ld): abort_command "
737 "mbx failed.\n", __func__, vha->host_no));
738 ret = FAILED;
739 } else {
740 DEBUG3(printk("%s(%ld): abort_command "
741 "mbx success.\n", __func__, vha->host_no));
742 wait = 1;
743 }
744 spin_lock_irqsave(&ha->hardware_lock, flags);
745 814
746 break; 815 spin_unlock_irqrestore(&ha->hardware_lock, flags);
816 if (ha->isp_ops->abort_command(vha, sp, req)) {
817 DEBUG2(printk("%s(%ld): abort_command "
818 "mbx failed.\n", __func__, vha->host_no));
819 } else {
820 DEBUG3(printk("%s(%ld): abort_command "
821 "mbx success.\n", __func__, vha->host_no));
822 wait = 1;
823 }
824 spin_lock_irqsave(&ha->hardware_lock, flags);
825 break;
826 }
747 } 827 }
748 spin_unlock_irqrestore(&ha->hardware_lock, flags); 828 spin_unlock_irqrestore(&ha->hardware_lock, flags);
749 829
@@ -774,41 +854,46 @@ static int
774qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, 854qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
775 unsigned int l, enum nexus_wait_type type) 855 unsigned int l, enum nexus_wait_type type)
776{ 856{
777 int cnt, match, status; 857 int cnt, match, status, que;
778 srb_t *sp; 858 srb_t *sp;
779 unsigned long flags; 859 unsigned long flags;
780 struct qla_hw_data *ha = vha->hw; 860 struct qla_hw_data *ha = vha->hw;
781 struct req_que *req = ha->req; 861 struct req_que *req;
782 862
783 status = QLA_SUCCESS; 863 status = QLA_SUCCESS;
784 spin_lock_irqsave(&ha->hardware_lock, flags); 864 spin_lock_irqsave(&ha->hardware_lock, flags);
785 for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS; 865 for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
786 cnt++) { 866 req = ha->req_q_map[vha->req_ques[que]];
787 sp = req->outstanding_cmds[cnt]; 867 if (!req)
788 if (!sp)
789 continue; 868 continue;
869 for (cnt = 1; status == QLA_SUCCESS &&
870 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
871 sp = req->outstanding_cmds[cnt];
872 if (!sp)
873 continue;
790 874
791 if (vha->vp_idx != sp->fcport->vha->vp_idx) 875 if (vha->vp_idx != sp->fcport->vha->vp_idx)
792 continue; 876 continue;
793 match = 0; 877 match = 0;
794 switch (type) { 878 switch (type) {
795 case WAIT_HOST: 879 case WAIT_HOST:
796 match = 1; 880 match = 1;
797 break; 881 break;
798 case WAIT_TARGET: 882 case WAIT_TARGET:
799 match = sp->cmd->device->id == t; 883 match = sp->cmd->device->id == t;
800 break; 884 break;
801 case WAIT_LUN: 885 case WAIT_LUN:
802 match = (sp->cmd->device->id == t && 886 match = (sp->cmd->device->id == t &&
803 sp->cmd->device->lun == l); 887 sp->cmd->device->lun == l);
804 break; 888 break;
805 } 889 }
806 if (!match) 890 if (!match)
807 continue; 891 continue;
808 892
809 spin_unlock_irqrestore(&ha->hardware_lock, flags); 893 spin_unlock_irqrestore(&ha->hardware_lock, flags);
810 status = qla2x00_eh_wait_on_command(sp->cmd); 894 status = qla2x00_eh_wait_on_command(sp->cmd);
811 spin_lock_irqsave(&ha->hardware_lock, flags); 895 spin_lock_irqsave(&ha->hardware_lock, flags);
896 }
812 } 897 }
813 spin_unlock_irqrestore(&ha->hardware_lock, flags); 898 spin_unlock_irqrestore(&ha->hardware_lock, flags);
814 899
@@ -1074,7 +1159,6 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1074 } 1159 }
1075 } 1160 }
1076 } 1161 }
1077
1078 /* Issue marker command only when we are going to start the I/O */ 1162 /* Issue marker command only when we are going to start the I/O */
1079 vha->marker_needed = 1; 1163 vha->marker_needed = 1;
1080 1164
@@ -1084,19 +1168,24 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1084void 1168void
1085qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) 1169qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1086{ 1170{
1087 int cnt; 1171 int que, cnt;
1088 unsigned long flags; 1172 unsigned long flags;
1089 srb_t *sp; 1173 srb_t *sp;
1090 struct qla_hw_data *ha = vha->hw; 1174 struct qla_hw_data *ha = vha->hw;
1091 struct req_que *req = ha->req; 1175 struct req_que *req;
1092 1176
1093 spin_lock_irqsave(&ha->hardware_lock, flags); 1177 spin_lock_irqsave(&ha->hardware_lock, flags);
1094 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1178 for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
1095 sp = req->outstanding_cmds[cnt]; 1179 req = ha->req_q_map[vha->req_ques[que]];
1096 if (sp) { 1180 if (!req)
1097 req->outstanding_cmds[cnt] = NULL; 1181 continue;
1098 sp->cmd->result = res; 1182 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1099 qla2x00_sp_compl(vha, sp); 1183 sp = req->outstanding_cmds[cnt];
1184 if (sp && sp->vha == vha) {
1185 req->outstanding_cmds[cnt] = NULL;
1186 sp->cmd->result = res;
1187 qla2x00_sp_compl(ha, sp);
1188 }
1100 } 1189 }
1101 } 1190 }
1102 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1191 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1121,11 +1210,12 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1121 scsi_qla_host_t *vha = shost_priv(sdev->host); 1210 scsi_qla_host_t *vha = shost_priv(sdev->host);
1122 struct qla_hw_data *ha = vha->hw; 1211 struct qla_hw_data *ha = vha->hw;
1123 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1212 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1213 struct req_que *req = ha->req_q_map[0];
1124 1214
1125 if (sdev->tagged_supported) 1215 if (sdev->tagged_supported)
1126 scsi_activate_tcq(sdev, ha->req->max_q_depth); 1216 scsi_activate_tcq(sdev, req->max_q_depth);
1127 else 1217 else
1128 scsi_deactivate_tcq(sdev, ha->req->max_q_depth); 1218 scsi_deactivate_tcq(sdev, req->max_q_depth);
1129 1219
1130 rport->dev_loss_tmo = ha->port_down_retry_count; 1220 rport->dev_loss_tmo = ha->port_down_retry_count;
1131 1221
@@ -1471,6 +1561,7 @@ static int
1471qla2x00_iospace_config(struct qla_hw_data *ha) 1561qla2x00_iospace_config(struct qla_hw_data *ha)
1472{ 1562{
1473 resource_size_t pio; 1563 resource_size_t pio;
1564 uint16_t msix;
1474 1565
1475 if (pci_request_selected_regions(ha->pdev, ha->bars, 1566 if (pci_request_selected_regions(ha->pdev, ha->bars,
1476 QLA2XXX_DRIVER_NAME)) { 1567 QLA2XXX_DRIVER_NAME)) {
@@ -1523,6 +1614,29 @@ skip_pio:
1523 goto iospace_error_exit; 1614 goto iospace_error_exit;
1524 } 1615 }
1525 1616
1617 /* Determine queue resources */
1618 ha->max_queues = 1;
1619 if (ql2xmaxqueues > 1) {
1620 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1621 pci_resource_len(ha->pdev, 3));
1622 if (ha->mqiobase) {
1623 /* Read MSIX vector size of the board */
1624 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL,
1625 &msix);
1626 ha->msix_count = msix;
1627 /* Max queues are bounded by available msix vectors */
1628 /* queue 0 uses two msix vectors */
1629 if (ha->msix_count - 1 < ql2xmaxqueues)
1630 ha->max_queues = ha->msix_count - 1;
1631 else if (ql2xmaxqueues > QLA_MQ_SIZE)
1632 ha->max_queues = QLA_MQ_SIZE;
1633 else
1634 ha->max_queues = ql2xmaxqueues;
1635 qla_printk(KERN_INFO, ha,
1636 "MSI-X vector count: %d\n", msix);
1637 }
1638 }
1639 ha->msix_count = ha->max_queues + 1;
1526 return (0); 1640 return (0);
1527 1641
1528iospace_error_exit: 1642iospace_error_exit:
@@ -1568,6 +1682,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1568 struct scsi_host_template *sht; 1682 struct scsi_host_template *sht;
1569 int bars, max_id, mem_only = 0; 1683 int bars, max_id, mem_only = 0;
1570 uint16_t req_length = 0, rsp_length = 0; 1684 uint16_t req_length = 0, rsp_length = 0;
1685 struct req_que *req = NULL;
1686 struct rsp_que *rsp = NULL;
1571 1687
1572 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 1688 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
1573 sht = &qla2x00_driver_template; 1689 sht = &qla2x00_driver_template;
@@ -1655,6 +1771,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1655 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1771 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1656 ha->gid_list_info_size = 8; 1772 ha->gid_list_info_size = 8;
1657 ha->optrom_size = OPTROM_SIZE_24XX; 1773 ha->optrom_size = OPTROM_SIZE_24XX;
1774 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
1658 ha->isp_ops = &qla24xx_isp_ops; 1775 ha->isp_ops = &qla24xx_isp_ops;
1659 } else if (IS_QLA25XX(ha)) { 1776 } else if (IS_QLA25XX(ha)) {
1660 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1777 ha->mbx_count = MAILBOX_REGISTER_COUNT;
@@ -1664,6 +1781,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1664 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1781 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1665 ha->gid_list_info_size = 8; 1782 ha->gid_list_info_size = 8;
1666 ha->optrom_size = OPTROM_SIZE_25XX; 1783 ha->optrom_size = OPTROM_SIZE_25XX;
1784 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
1667 ha->isp_ops = &qla25xx_isp_ops; 1785 ha->isp_ops = &qla25xx_isp_ops;
1668 } 1786 }
1669 1787
@@ -1674,7 +1792,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1674 1792
1675 set_bit(0, (unsigned long *) ha->vp_idx_map); 1793 set_bit(0, (unsigned long *) ha->vp_idx_map);
1676 1794
1677 ret = qla2x00_mem_alloc(ha, req_length, rsp_length); 1795 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
1678 if (!ret) { 1796 if (!ret) {
1679 qla_printk(KERN_WARNING, ha, 1797 qla_printk(KERN_WARNING, ha,
1680 "[ERROR] Failed to allocate memory for adapter\n"); 1798 "[ERROR] Failed to allocate memory for adapter\n");
@@ -1682,9 +1800,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1682 goto probe_hw_failed; 1800 goto probe_hw_failed;
1683 } 1801 }
1684 1802
1685 ha->req->max_q_depth = MAX_Q_DEPTH; 1803 req->max_q_depth = MAX_Q_DEPTH;
1686 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU) 1804 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
1687 ha->req->max_q_depth = ql2xmaxqdepth; 1805 req->max_q_depth = ql2xmaxqdepth;
1806
1688 1807
1689 base_vha = qla2x00_create_host(sht, ha); 1808 base_vha = qla2x00_create_host(sht, ha);
1690 if (!base_vha) { 1809 if (!base_vha) {
@@ -1700,13 +1819,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1700 qla2x00_config_dma_addressing(base_vha); 1819 qla2x00_config_dma_addressing(base_vha);
1701 1820
1702 host = base_vha->host; 1821 host = base_vha->host;
1703 host->can_queue = ha->req->length + 128; 1822 base_vha->req_ques[0] = req->id;
1704 if (IS_QLA2XXX_MIDTYPE(ha)) { 1823 host->can_queue = req->length + 128;
1824 if (IS_QLA2XXX_MIDTYPE(ha))
1705 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; 1825 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
1706 } else { 1826 else
1707 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + 1827 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
1708 base_vha->vp_idx; 1828 base_vha->vp_idx;
1709 }
1710 if (IS_QLA2100(ha)) 1829 if (IS_QLA2100(ha))
1711 host->sg_tablesize = 32; 1830 host->sg_tablesize = 32;
1712 host->max_id = max_id; 1831 host->max_id = max_id;
@@ -1718,6 +1837,21 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1718 host->max_lun = MAX_LUNS; 1837 host->max_lun = MAX_LUNS;
1719 host->transportt = qla2xxx_transport_template; 1838 host->transportt = qla2xxx_transport_template;
1720 1839
1840 /* Set up the irqs */
1841 ret = qla2x00_request_irqs(ha, rsp);
1842 if (ret)
1843 goto probe_failed;
1844
1845 /* Alloc arrays of request and response ring ptrs */
1846 if (!qla2x00_alloc_queues(ha)) {
1847 qla_printk(KERN_WARNING, ha,
1848 "[ERROR] Failed to allocate memory for queue"
1849 " pointers\n");
1850 goto probe_failed;
1851 }
1852 ha->rsp_q_map[0] = rsp;
1853 ha->req_q_map[0] = req;
1854
1721 if (qla2x00_initialize_adapter(base_vha)) { 1855 if (qla2x00_initialize_adapter(base_vha)) {
1722 qla_printk(KERN_WARNING, ha, 1856 qla_printk(KERN_WARNING, ha,
1723 "Failed to initialize adapter\n"); 1857 "Failed to initialize adapter\n");
@@ -1730,11 +1864,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1730 goto probe_failed; 1864 goto probe_failed;
1731 } 1865 }
1732 1866
1733 /* Set up the irqs */
1734 ret = qla2x00_request_irqs(ha);
1735 if (ret)
1736 goto probe_failed;
1737
1738 /* 1867 /*
1739 * Startup the kernel thread for this host adapter 1868 * Startup the kernel thread for this host adapter
1740 */ 1869 */
@@ -1786,6 +1915,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1786 return 0; 1915 return 0;
1787 1916
1788probe_failed: 1917probe_failed:
1918 qla2x00_free_que(ha, req, rsp);
1789 qla2x00_free_device(base_vha); 1919 qla2x00_free_device(base_vha);
1790 1920
1791 scsi_host_put(base_vha->host); 1921 scsi_host_put(base_vha->host);
@@ -1836,6 +1966,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
1836 if (ha->iobase) 1966 if (ha->iobase)
1837 iounmap(ha->iobase); 1967 iounmap(ha->iobase);
1838 1968
1969 if (ha->mqiobase)
1970 iounmap(ha->mqiobase);
1971
1839 pci_release_selected_regions(ha->pdev, ha->bars); 1972 pci_release_selected_regions(ha->pdev, ha->bars);
1840 kfree(ha); 1973 kfree(ha);
1841 ha = NULL; 1974 ha = NULL;
@@ -1884,6 +2017,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
1884 qla2x00_free_irqs(vha); 2017 qla2x00_free_irqs(vha);
1885 2018
1886 qla2x00_mem_free(ha); 2019 qla2x00_mem_free(ha);
2020
2021 qla2x00_free_queues(ha);
1887} 2022}
1888 2023
1889static inline void 2024static inline void
@@ -1998,11 +2133,10 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
1998* !0 = failure. 2133* !0 = failure.
1999*/ 2134*/
2000static int 2135static int
2001qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len) 2136qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2137 struct req_que **req, struct rsp_que **rsp)
2002{ 2138{
2003 char name[16]; 2139 char name[16];
2004 struct req_que *req = NULL;
2005 struct rsp_que *rsp = NULL;
2006 2140
2007 ha->init_cb_size = sizeof(init_cb_t); 2141 ha->init_cb_size = sizeof(init_cb_t);
2008 if (IS_QLA2XXX_MIDTYPE(ha)) 2142 if (IS_QLA2XXX_MIDTYPE(ha))
@@ -2055,52 +2189,67 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len)
2055 } 2189 }
2056 2190
2057 /* Allocate memory for request ring */ 2191 /* Allocate memory for request ring */
2058 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 2192 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2059 if (!req) { 2193 if (!*req) {
2060 DEBUG(printk("Unable to allocate memory for req\n")); 2194 DEBUG(printk("Unable to allocate memory for req\n"));
2061 goto fail_req; 2195 goto fail_req;
2062 } 2196 }
2063 ha->req = req; 2197 (*req)->length = req_len;
2064 req->length = req_len; 2198 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
2065 req->ring = dma_alloc_coherent(&ha->pdev->dev, 2199 ((*req)->length + 1) * sizeof(request_t),
2066 (req->length + 1) * sizeof(request_t), 2200 &(*req)->dma, GFP_KERNEL);
2067 &req->dma, GFP_KERNEL); 2201 if (!(*req)->ring) {
2068 if (!req->ring) {
2069 DEBUG(printk("Unable to allocate memory for req_ring\n")); 2202 DEBUG(printk("Unable to allocate memory for req_ring\n"));
2070 goto fail_req_ring; 2203 goto fail_req_ring;
2071 } 2204 }
2072 /* Allocate memory for response ring */ 2205 /* Allocate memory for response ring */
2073 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 2206 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2074 if (!rsp) { 2207 if (!*rsp) {
2075 DEBUG(printk("Unable to allocate memory for rsp\n")); 2208 qla_printk(KERN_WARNING, ha,
2209 "Unable to allocate memory for rsp\n");
2076 goto fail_rsp; 2210 goto fail_rsp;
2077 } 2211 }
2078 ha->rsp = rsp; 2212 (*rsp)->hw = ha;
2079 rsp->hw = ha; 2213 (*rsp)->length = rsp_len;
2080 rsp->length = rsp_len; 2214 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
2081 2215 ((*rsp)->length + 1) * sizeof(response_t),
2082 rsp->ring = dma_alloc_coherent(&ha->pdev->dev, 2216 &(*rsp)->dma, GFP_KERNEL);
2083 (rsp->length + 1) * sizeof(response_t), 2217 if (!(*rsp)->ring) {
2084 &rsp->dma, GFP_KERNEL); 2218 qla_printk(KERN_WARNING, ha,
2085 if (!rsp->ring) { 2219 "Unable to allocate memory for rsp_ring\n");
2086 DEBUG(printk("Unable to allocate memory for rsp_ring\n"));
2087 goto fail_rsp_ring; 2220 goto fail_rsp_ring;
2088 } 2221 }
2222 (*req)->rsp = *rsp;
2223 (*rsp)->req = *req;
2224 /* Allocate memory for NVRAM data for vports */
2225 if (ha->nvram_npiv_size) {
2226 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
2227 ha->nvram_npiv_size, GFP_KERNEL);
2228 if (!ha->npiv_info) {
2229 qla_printk(KERN_WARNING, ha,
2230 "Unable to allocate memory for npiv info\n");
2231 goto fail_npiv_info;
2232 }
2233 } else
2234 ha->npiv_info = NULL;
2089 2235
2090 INIT_LIST_HEAD(&ha->vp_list); 2236 INIT_LIST_HEAD(&ha->vp_list);
2091 return 1; 2237 return 1;
2092 2238
2239fail_npiv_info:
2240 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
2241 sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
2242 (*rsp)->ring = NULL;
2243 (*rsp)->dma = 0;
2093fail_rsp_ring: 2244fail_rsp_ring:
2094 kfree(rsp); 2245 kfree(*rsp);
2095 ha->rsp = NULL;
2096fail_rsp: 2246fail_rsp:
2097 dma_free_coherent(&ha->pdev->dev, (req->length + 1) * 2247 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
2098 sizeof(request_t), req->ring, req->dma); 2248 sizeof(request_t), (*req)->ring, (*req)->dma);
2099 req->ring = NULL; 2249 (*req)->ring = NULL;
2100 req->dma = 0; 2250 (*req)->dma = 0;
2101fail_req_ring: 2251fail_req_ring:
2102 kfree(req); 2252 kfree(*req);
2103 ha->req = NULL;
2104fail_req: 2253fail_req:
2105 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 2254 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2106 ha->ct_sns, ha->ct_sns_dma); 2255 ha->ct_sns, ha->ct_sns_dma);
@@ -2144,9 +2293,6 @@ fail:
2144static void 2293static void
2145qla2x00_mem_free(struct qla_hw_data *ha) 2294qla2x00_mem_free(struct qla_hw_data *ha)
2146{ 2295{
2147 struct req_que *req = ha->req;
2148 struct rsp_que *rsp = ha->rsp;
2149
2150 if (ha->srb_mempool) 2296 if (ha->srb_mempool)
2151 mempool_destroy(ha->srb_mempool); 2297 mempool_destroy(ha->srb_mempool);
2152 2298
@@ -2189,6 +2335,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
2189 ha->init_cb, ha->init_cb_dma); 2335 ha->init_cb, ha->init_cb_dma);
2190 vfree(ha->optrom_buffer); 2336 vfree(ha->optrom_buffer);
2191 kfree(ha->nvram); 2337 kfree(ha->nvram);
2338 kfree(ha->npiv_info);
2192 2339
2193 ha->srb_mempool = NULL; 2340 ha->srb_mempool = NULL;
2194 ha->eft = NULL; 2341 ha->eft = NULL;
@@ -2210,26 +2357,6 @@ qla2x00_mem_free(struct qla_hw_data *ha)
2210 ha->fw_dump = NULL; 2357 ha->fw_dump = NULL;
2211 ha->fw_dumped = 0; 2358 ha->fw_dumped = 0;
2212 ha->fw_dump_reading = 0; 2359 ha->fw_dump_reading = 0;
2213
2214 if (rsp) {
2215 if (rsp->ring)
2216 dma_free_coherent(&ha->pdev->dev,
2217 (rsp->length + 1) * sizeof(response_t),
2218 rsp->ring, rsp->dma);
2219
2220 kfree(rsp);
2221 rsp = NULL;
2222 }
2223
2224 if (req) {
2225 if (req->ring)
2226 dma_free_coherent(&ha->pdev->dev,
2227 (req->length + 1) * sizeof(request_t),
2228 req->ring, req->dma);
2229
2230 kfree(req);
2231 req = NULL;
2232 }
2233} 2360}
2234 2361
2235struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, 2362struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -2613,9 +2740,8 @@ qla2x00_sp_free_dma(srb_t *sp)
2613} 2740}
2614 2741
2615void 2742void
2616qla2x00_sp_compl(scsi_qla_host_t *vha, srb_t *sp) 2743qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
2617{ 2744{
2618 struct qla_hw_data *ha = vha->hw;
2619 struct scsi_cmnd *cmd = sp->cmd; 2745 struct scsi_cmnd *cmd = sp->cmd;
2620 2746
2621 qla2x00_sp_free_dma(sp); 2747 qla2x00_sp_free_dma(sp);
@@ -2643,7 +2769,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
2643 srb_t *sp; 2769 srb_t *sp;
2644 int t; 2770 int t;
2645 struct qla_hw_data *ha = vha->hw; 2771 struct qla_hw_data *ha = vha->hw;
2646 struct req_que *req = ha->req; 2772 struct req_que *req;
2647 /* 2773 /*
2648 * Ports - Port down timer. 2774 * Ports - Port down timer.
2649 * 2775 *
@@ -2693,6 +2819,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
2693 if (!vha->vp_idx) { 2819 if (!vha->vp_idx) {
2694 spin_lock_irqsave(&ha->hardware_lock, 2820 spin_lock_irqsave(&ha->hardware_lock,
2695 cpu_flags); 2821 cpu_flags);
2822 req = ha->req_q_map[0];
2696 for (index = 1; 2823 for (index = 1;
2697 index < MAX_OUTSTANDING_COMMANDS; 2824 index < MAX_OUTSTANDING_COMMANDS;
2698 index++) { 2825 index++) {