aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_os.c
diff options
context:
space:
mode:
authorAnirban Chakraborty <anirban.chakraborty@qlogic.com>2009-04-07 01:33:40 -0400
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-05-20 18:21:07 -0400
commit2afa19a9377ca61b9489e44bf50029574fbe63be (patch)
treecdfa3878eb04d833bbcd9ce92196bc4456b5ccf5 /drivers/scsi/qla2xxx/qla_os.c
parent7640335ea5b1a2da0d64303e6003012c619ae01a (diff)
[SCSI] qla2xxx: Add QoS support.
Set the number of request queues to the module paramater ql2xmaxqueues. Each vport gets a request queue. The QoS value set to the request queues determines priority control for queued IOs. If QoS value is not specified, the vports use the default queue 0. Signed-off-by: Anirban Chakraborty <anirban.chakraborty@qlogic.com> Signed-off-by: Andrew Vasquez <andrew.vasquez@qlogic.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_os.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c135
1 files changed, 71 insertions, 64 deletions
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 29234ba42b42..e2647e02dac9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -187,7 +187,7 @@ static void qla2x00_sp_free_dma(srb_t *);
187/* -------------------------------------------------------------------------- */ 187/* -------------------------------------------------------------------------- */
188static int qla2x00_alloc_queues(struct qla_hw_data *ha) 188static int qla2x00_alloc_queues(struct qla_hw_data *ha)
189{ 189{
190 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues, 190 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
191 GFP_KERNEL); 191 GFP_KERNEL);
192 if (!ha->req_q_map) { 192 if (!ha->req_q_map) {
193 qla_printk(KERN_WARNING, ha, 193 qla_printk(KERN_WARNING, ha,
@@ -195,7 +195,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
195 goto fail_req_map; 195 goto fail_req_map;
196 } 196 }
197 197
198 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues, 198 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
199 GFP_KERNEL); 199 GFP_KERNEL);
200 if (!ha->rsp_q_map) { 200 if (!ha->rsp_q_map) {
201 qla_printk(KERN_WARNING, ha, 201 qla_printk(KERN_WARNING, ha,
@@ -213,16 +213,8 @@ fail_req_map:
213 return -ENOMEM; 213 return -ENOMEM;
214} 214}
215 215
216static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req, 216static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
217 struct rsp_que *rsp)
218{ 217{
219 if (rsp && rsp->ring)
220 dma_free_coherent(&ha->pdev->dev,
221 (rsp->length + 1) * sizeof(response_t),
222 rsp->ring, rsp->dma);
223
224 kfree(rsp);
225 rsp = NULL;
226 if (req && req->ring) 218 if (req && req->ring)
227 dma_free_coherent(&ha->pdev->dev, 219 dma_free_coherent(&ha->pdev->dev,
228 (req->length + 1) * sizeof(request_t), 220 (req->length + 1) * sizeof(request_t),
@@ -232,22 +224,36 @@ static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
232 req = NULL; 224 req = NULL;
233} 225}
234 226
227static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
228{
229 if (rsp && rsp->ring)
230 dma_free_coherent(&ha->pdev->dev,
231 (rsp->length + 1) * sizeof(response_t),
232 rsp->ring, rsp->dma);
233
234 kfree(rsp);
235 rsp = NULL;
236}
237
235static void qla2x00_free_queues(struct qla_hw_data *ha) 238static void qla2x00_free_queues(struct qla_hw_data *ha)
236{ 239{
237 struct req_que *req; 240 struct req_que *req;
238 struct rsp_que *rsp; 241 struct rsp_que *rsp;
239 int cnt; 242 int cnt;
240 243
241 for (cnt = 0; cnt < ha->max_queues; cnt++) { 244 for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
242 rsp = ha->rsp_q_map[cnt];
243 req = ha->req_q_map[cnt]; 245 req = ha->req_q_map[cnt];
244 qla2x00_free_que(ha, req, rsp); 246 qla2x00_free_req_que(ha, req);
245 } 247 }
246 kfree(ha->rsp_q_map);
247 ha->rsp_q_map = NULL;
248
249 kfree(ha->req_q_map); 248 kfree(ha->req_q_map);
250 ha->req_q_map = NULL; 249 ha->req_q_map = NULL;
250
251 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
252 rsp = ha->rsp_q_map[cnt];
253 qla2x00_free_rsp_que(ha, rsp);
254 }
255 kfree(ha->rsp_q_map);
256 ha->rsp_q_map = NULL;
251} 257}
252 258
253static char * 259static char *
@@ -612,7 +618,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
612void 618void
613qla2x00_abort_fcport_cmds(fc_port_t *fcport) 619qla2x00_abort_fcport_cmds(fc_port_t *fcport)
614{ 620{
615 int cnt, que, id; 621 int cnt;
616 unsigned long flags; 622 unsigned long flags;
617 srb_t *sp; 623 srb_t *sp;
618 scsi_qla_host_t *vha = fcport->vha; 624 scsi_qla_host_t *vha = fcport->vha;
@@ -620,32 +626,27 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport)
620 struct req_que *req; 626 struct req_que *req;
621 627
622 spin_lock_irqsave(&ha->hardware_lock, flags); 628 spin_lock_irqsave(&ha->hardware_lock, flags);
623 for (que = 0; que < QLA_MAX_HOST_QUES; que++) { 629 req = vha->req;
624 id = vha->req_ques[que]; 630 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
625 req = ha->req_q_map[id]; 631 sp = req->outstanding_cmds[cnt];
626 if (!req) 632 if (!sp)
633 continue;
634 if (sp->fcport != fcport)
627 continue; 635 continue;
628 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
629 sp = req->outstanding_cmds[cnt];
630 if (!sp)
631 continue;
632 if (sp->fcport != fcport)
633 continue;
634 636
635 spin_unlock_irqrestore(&ha->hardware_lock, flags); 637 spin_unlock_irqrestore(&ha->hardware_lock, flags);
636 if (ha->isp_ops->abort_command(vha, sp, req)) { 638 if (ha->isp_ops->abort_command(sp)) {
639 DEBUG2(qla_printk(KERN_WARNING, ha,
640 "Abort failed -- %lx\n",
641 sp->cmd->serial_number));
642 } else {
643 if (qla2x00_eh_wait_on_command(sp->cmd) !=
644 QLA_SUCCESS)
637 DEBUG2(qla_printk(KERN_WARNING, ha, 645 DEBUG2(qla_printk(KERN_WARNING, ha,
638 "Abort failed -- %lx\n", 646 "Abort failed while waiting -- %lx\n",
639 sp->cmd->serial_number)); 647 sp->cmd->serial_number));
640 } else {
641 if (qla2x00_eh_wait_on_command(sp->cmd) !=
642 QLA_SUCCESS)
643 DEBUG2(qla_printk(KERN_WARNING, ha,
644 "Abort failed while waiting -- %lx\n",
645 sp->cmd->serial_number));
646 }
647 spin_lock_irqsave(&ha->hardware_lock, flags);
648 } 648 }
649 spin_lock_irqsave(&ha->hardware_lock, flags);
649 } 650 }
650 spin_unlock_irqrestore(&ha->hardware_lock, flags); 651 spin_unlock_irqrestore(&ha->hardware_lock, flags);
651} 652}
@@ -726,7 +727,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
726 " pid=%ld.\n", __func__, vha->host_no, sp, serial)); 727 " pid=%ld.\n", __func__, vha->host_no, sp, serial));
727 728
728 spin_unlock_irqrestore(&ha->hardware_lock, flags); 729 spin_unlock_irqrestore(&ha->hardware_lock, flags);
729 if (ha->isp_ops->abort_command(vha, sp, req)) { 730 if (ha->isp_ops->abort_command(sp)) {
730 DEBUG2(printk("%s(%ld): abort_command " 731 DEBUG2(printk("%s(%ld): abort_command "
731 "mbx failed.\n", __func__, vha->host_no)); 732 "mbx failed.\n", __func__, vha->host_no));
732 ret = FAILED; 733 ret = FAILED;
@@ -820,7 +821,7 @@ static char *reset_errors[] = {
820 821
821static int 822static int
822__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 823__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
823 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) 824 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
824{ 825{
825 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 826 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
826 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 827 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
@@ -841,7 +842,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
841 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) 842 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
842 goto eh_reset_failed; 843 goto eh_reset_failed;
843 err = 2; 844 err = 2;
844 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) 845 if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
846 != QLA_SUCCESS)
845 goto eh_reset_failed; 847 goto eh_reset_failed;
846 err = 3; 848 err = 3;
847 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 849 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
@@ -1065,7 +1067,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1065 if (fcport->port_type != FCT_TARGET) 1067 if (fcport->port_type != FCT_TARGET)
1066 continue; 1068 continue;
1067 1069
1068 ret = ha->isp_ops->target_reset(fcport, 0); 1070 ret = ha->isp_ops->target_reset(fcport, 0, 0);
1069 if (ret != QLA_SUCCESS) { 1071 if (ret != QLA_SUCCESS) {
1070 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1072 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1071 "target_reset=%d d_id=%x.\n", __func__, 1073 "target_reset=%d d_id=%x.\n", __func__,
@@ -1089,7 +1091,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1089 struct req_que *req; 1091 struct req_que *req;
1090 1092
1091 spin_lock_irqsave(&ha->hardware_lock, flags); 1093 spin_lock_irqsave(&ha->hardware_lock, flags);
1092 for (que = 0; que < ha->max_queues; que++) { 1094 for (que = 0; que < ha->max_req_queues; que++) {
1093 req = ha->req_q_map[que]; 1095 req = ha->req_q_map[que];
1094 if (!req) 1096 if (!req)
1095 continue; 1097 continue;
@@ -1124,7 +1126,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1124 scsi_qla_host_t *vha = shost_priv(sdev->host); 1126 scsi_qla_host_t *vha = shost_priv(sdev->host);
1125 struct qla_hw_data *ha = vha->hw; 1127 struct qla_hw_data *ha = vha->hw;
1126 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1128 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1127 struct req_que *req = ha->req_q_map[vha->req_ques[0]]; 1129 struct req_que *req = vha->req;
1128 1130
1129 if (sdev->tagged_supported) 1131 if (sdev->tagged_supported)
1130 scsi_activate_tcq(sdev, req->max_q_depth); 1132 scsi_activate_tcq(sdev, req->max_q_depth);
@@ -1572,8 +1574,9 @@ skip_pio:
1572 } 1574 }
1573 1575
1574 /* Determine queue resources */ 1576 /* Determine queue resources */
1575 ha->max_queues = 1; 1577 ha->max_req_queues = ha->max_rsp_queues = 1;
1576 if (ql2xmaxqueues <= 1 || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) 1578 if (ql2xmaxqueues <= 1 &&
1579 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
1577 goto mqiobase_exit; 1580 goto mqiobase_exit;
1578 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), 1581 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1579 pci_resource_len(ha->pdev, 3)); 1582 pci_resource_len(ha->pdev, 3));
@@ -1581,20 +1584,17 @@ skip_pio:
1581 /* Read MSIX vector size of the board */ 1584 /* Read MSIX vector size of the board */
1582 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); 1585 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1583 ha->msix_count = msix; 1586 ha->msix_count = msix;
1584 /* Max queues are bounded by available msix vectors */ 1587 if (ql2xmaxqueues > 1) {
1585 /* queue 0 uses two msix vectors */ 1588 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1586 if (ha->msix_count - 1 < ql2xmaxqueues) 1589 QLA_MQ_SIZE : ql2xmaxqueues;
1587 ha->max_queues = ha->msix_count - 1; 1590 DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
1588 else if (ql2xmaxqueues > QLA_MQ_SIZE) 1591 " of request queues:%d\n", ha->max_req_queues));
1589 ha->max_queues = QLA_MQ_SIZE; 1592 }
1590 else 1593 } else
1591 ha->max_queues = ql2xmaxqueues; 1594 qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
1592 qla_printk(KERN_INFO, ha,
1593 "MSI-X vector count: %d\n", msix);
1594 }
1595 1595
1596mqiobase_exit: 1596mqiobase_exit:
1597 ha->msix_count = ha->max_queues + 1; 1597 ha->msix_count = ha->max_rsp_queues + 1;
1598 return (0); 1598 return (0);
1599 1599
1600iospace_error_exit: 1600iospace_error_exit:
@@ -1804,14 +1804,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1804 1804
1805 ret = -ENOMEM; 1805 ret = -ENOMEM;
1806 qla2x00_mem_free(ha); 1806 qla2x00_mem_free(ha);
1807 qla2x00_free_que(ha, req, rsp); 1807 qla2x00_free_req_que(ha, req);
1808 qla2x00_free_rsp_que(ha, rsp);
1808 goto probe_hw_failed; 1809 goto probe_hw_failed;
1809 } 1810 }
1810 1811
1811 pci_set_drvdata(pdev, base_vha); 1812 pci_set_drvdata(pdev, base_vha);
1812 1813
1813 host = base_vha->host; 1814 host = base_vha->host;
1814 base_vha->req_ques[0] = req->id; 1815 base_vha->req = req;
1815 host->can_queue = req->length + 128; 1816 host->can_queue = req->length + 128;
1816 if (IS_QLA2XXX_MIDTYPE(ha)) 1817 if (IS_QLA2XXX_MIDTYPE(ha))
1817 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; 1818 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
@@ -1842,7 +1843,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1842 } 1843 }
1843 ha->rsp_q_map[0] = rsp; 1844 ha->rsp_q_map[0] = rsp;
1844 ha->req_q_map[0] = req; 1845 ha->req_q_map[0] = req;
1845 1846 rsp->req = req;
1847 req->rsp = rsp;
1848 set_bit(0, ha->req_qid_map);
1849 set_bit(0, ha->rsp_qid_map);
1846 /* FWI2-capable only. */ 1850 /* FWI2-capable only. */
1847 req->req_q_in = &ha->iobase->isp24.req_q_in; 1851 req->req_q_in = &ha->iobase->isp24.req_q_in;
1848 req->req_q_out = &ha->iobase->isp24.req_q_out; 1852 req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -1918,8 +1922,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1918 return 0; 1922 return 0;
1919 1923
1920probe_init_failed: 1924probe_init_failed:
1921 qla2x00_free_que(ha, req, rsp); 1925 qla2x00_free_req_que(ha, req);
1922 ha->max_queues = 0; 1926 qla2x00_free_rsp_que(ha, rsp);
1927 ha->max_req_queues = ha->max_rsp_queues = 0;
1923 1928
1924probe_failed: 1929probe_failed:
1925 if (base_vha->timer_active) 1930 if (base_vha->timer_active)
@@ -2018,6 +2023,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2018{ 2023{
2019 struct qla_hw_data *ha = vha->hw; 2024 struct qla_hw_data *ha = vha->hw;
2020 2025
2026 qla25xx_delete_queues(vha);
2027
2021 if (ha->flags.fce_enabled) 2028 if (ha->flags.fce_enabled)
2022 qla2x00_disable_fce_trace(vha, NULL, NULL); 2029 qla2x00_disable_fce_trace(vha, NULL, NULL);
2023 2030