aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_os.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_os.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c94
1 files changed, 59 insertions, 35 deletions
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 48eaa3bb5433..7c8af7ed2a5d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -27,7 +27,6 @@ char qla2x00_version_str[40];
27 */ 27 */
28static struct kmem_cache *srb_cachep; 28static struct kmem_cache *srb_cachep;
29 29
30int num_hosts;
31int ql2xlogintimeout = 20; 30int ql2xlogintimeout = 20;
32module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR); 31module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
33MODULE_PARM_DESC(ql2xlogintimeout, 32MODULE_PARM_DESC(ql2xlogintimeout,
@@ -87,6 +86,13 @@ MODULE_PARM_DESC(ql2xqfullrampup,
87 "depth for a device after a queue-full condition has been " 86 "depth for a device after a queue-full condition has been "
88 "detected. Default is 120 seconds."); 87 "detected. Default is 120 seconds.");
89 88
89int ql2xiidmaenable=1;
90module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
91MODULE_PARM_DESC(ql2xiidmaenable,
92 "Enables iIDMA settings "
93 "Default is 1 - perform iIDMA. 0 - no iIDMA.");
94
95
90/* 96/*
91 * SCSI host template entry points 97 * SCSI host template entry points
92 */ 98 */
@@ -388,7 +394,7 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
388 } 394 }
389 395
390 /* Close window on fcport/rport state-transitioning. */ 396 /* Close window on fcport/rport state-transitioning. */
391 if (!*(fc_port_t **)rport->dd_data) { 397 if (fcport->drport) {
392 cmd->result = DID_IMM_RETRY << 16; 398 cmd->result = DID_IMM_RETRY << 16;
393 goto qc_fail_command; 399 goto qc_fail_command;
394 } 400 }
@@ -443,7 +449,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
443 int rval; 449 int rval;
444 scsi_qla_host_t *pha = to_qla_parent(ha); 450 scsi_qla_host_t *pha = to_qla_parent(ha);
445 451
446 if (unlikely(pci_channel_offline(ha->pdev))) { 452 if (unlikely(pci_channel_offline(pha->pdev))) {
447 cmd->result = DID_REQUEUE << 16; 453 cmd->result = DID_REQUEUE << 16;
448 goto qc24_fail_command; 454 goto qc24_fail_command;
449 } 455 }
@@ -455,7 +461,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
455 } 461 }
456 462
457 /* Close window on fcport/rport state-transitioning. */ 463 /* Close window on fcport/rport state-transitioning. */
458 if (!*(fc_port_t **)rport->dd_data) { 464 if (fcport->drport) {
459 cmd->result = DID_IMM_RETRY << 16; 465 cmd->result = DID_IMM_RETRY << 16;
460 goto qc24_fail_command; 466 goto qc24_fail_command;
461 } 467 }
@@ -617,6 +623,40 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
617 return (return_status); 623 return (return_status);
618} 624}
619 625
626void
627qla2x00_abort_fcport_cmds(fc_port_t *fcport)
628{
629 int cnt;
630 unsigned long flags;
631 srb_t *sp;
632 scsi_qla_host_t *ha = fcport->ha;
633 scsi_qla_host_t *pha = to_qla_parent(ha);
634
635 spin_lock_irqsave(&pha->hardware_lock, flags);
636 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
637 sp = pha->outstanding_cmds[cnt];
638 if (!sp)
639 continue;
640 if (sp->fcport != fcport)
641 continue;
642
643 spin_unlock_irqrestore(&pha->hardware_lock, flags);
644 if (ha->isp_ops->abort_command(ha, sp)) {
645 DEBUG2(qla_printk(KERN_WARNING, ha,
646 "Abort failed -- %lx\n", sp->cmd->serial_number));
647 } else {
648 if (qla2x00_eh_wait_on_command(ha, sp->cmd) !=
649 QLA_SUCCESS)
650 DEBUG2(qla_printk(KERN_WARNING, ha,
651 "Abort failed while waiting -- %lx\n",
652 sp->cmd->serial_number));
653
654 }
655 spin_lock_irqsave(&pha->hardware_lock, flags);
656 }
657 spin_unlock_irqrestore(&pha->hardware_lock, flags);
658}
659
620static void 660static void
621qla2x00_block_error_handler(struct scsi_cmnd *cmnd) 661qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
622{ 662{
@@ -1073,7 +1113,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1073 else 1113 else
1074 scsi_deactivate_tcq(sdev, ha->max_q_depth); 1114 scsi_deactivate_tcq(sdev, ha->max_q_depth);
1075 1115
1076 rport->dev_loss_tmo = ha->port_down_retry_count + 5; 1116 rport->dev_loss_tmo = ha->port_down_retry_count;
1077 1117
1078 return 0; 1118 return 0;
1079} 1119}
@@ -1629,9 +1669,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1629 } 1669 }
1630 host->can_queue = ha->request_q_length + 128; 1670 host->can_queue = ha->request_q_length + 128;
1631 1671
1632 /* load the F/W, read paramaters, and init the H/W */
1633 ha->instance = num_hosts;
1634
1635 mutex_init(&ha->vport_lock); 1672 mutex_init(&ha->vport_lock);
1636 init_completion(&ha->mbx_cmd_comp); 1673 init_completion(&ha->mbx_cmd_comp);
1637 complete(&ha->mbx_cmd_comp); 1674 complete(&ha->mbx_cmd_comp);
@@ -1679,7 +1716,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1679 1716
1680 host->this_id = 255; 1717 host->this_id = 255;
1681 host->cmd_per_lun = 3; 1718 host->cmd_per_lun = 3;
1682 host->unique_id = ha->instance; 1719 host->unique_id = host->host_no;
1683 host->max_cmd_len = MAX_CMDSZ; 1720 host->max_cmd_len = MAX_CMDSZ;
1684 host->max_channel = MAX_BUSES - 1; 1721 host->max_channel = MAX_BUSES - 1;
1685 host->max_lun = MAX_LUNS; 1722 host->max_lun = MAX_LUNS;
@@ -1700,8 +1737,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1700 ha->flags.init_done = 1; 1737 ha->flags.init_done = 1;
1701 ha->flags.online = 1; 1738 ha->flags.online = 1;
1702 1739
1703 num_hosts++;
1704
1705 ret = scsi_add_host(host, &pdev->dev); 1740 ret = scsi_add_host(host, &pdev->dev);
1706 if (ret) 1741 if (ret)
1707 goto probe_failed; 1742 goto probe_failed;
@@ -1813,27 +1848,21 @@ static inline void
1813qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport, 1848qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
1814 int defer) 1849 int defer)
1815{ 1850{
1816 unsigned long flags;
1817 struct fc_rport *rport; 1851 struct fc_rport *rport;
1852 scsi_qla_host_t *pha = to_qla_parent(ha);
1818 1853
1819 if (!fcport->rport) 1854 if (!fcport->rport)
1820 return; 1855 return;
1821 1856
1822 rport = fcport->rport; 1857 rport = fcport->rport;
1823 if (defer) { 1858 if (defer) {
1824 spin_lock_irqsave(&fcport->rport_lock, flags); 1859 spin_lock_irq(ha->host->host_lock);
1825 fcport->drport = rport; 1860 fcport->drport = rport;
1826 fcport->rport = NULL; 1861 spin_unlock_irq(ha->host->host_lock);
1827 *(fc_port_t **)rport->dd_data = NULL; 1862 set_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags);
1828 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1863 qla2xxx_wake_dpc(pha);
1829 set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 1864 } else
1830 } else {
1831 spin_lock_irqsave(&fcport->rport_lock, flags);
1832 fcport->rport = NULL;
1833 *(fc_port_t **)rport->dd_data = NULL;
1834 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1835 fc_remote_port_delete(rport); 1865 fc_remote_port_delete(rport);
1836 }
1837} 1866}
1838 1867
1839/* 1868/*
@@ -1903,7 +1932,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
1903 scsi_qla_host_t *pha = to_qla_parent(ha); 1932 scsi_qla_host_t *pha = to_qla_parent(ha);
1904 1933
1905 list_for_each_entry(fcport, &pha->fcports, list) { 1934 list_for_each_entry(fcport, &pha->fcports, list) {
1906 if (ha->vp_idx != 0 && ha->vp_idx != fcport->vp_idx) 1935 if (ha->vp_idx != fcport->vp_idx)
1907 continue; 1936 continue;
1908 /* 1937 /*
1909 * No point in marking the device as lost, if the device is 1938 * No point in marking the device as lost, if the device is
@@ -1911,17 +1940,10 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
1911 */ 1940 */
1912 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 1941 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
1913 continue; 1942 continue;
1914 if (atomic_read(&fcport->state) == FCS_ONLINE) { 1943 if (atomic_read(&fcport->state) == FCS_ONLINE)
1915 if (defer) 1944 qla2x00_schedule_rport_del(ha, fcport, defer);
1916 qla2x00_schedule_rport_del(ha, fcport, defer);
1917 else if (ha->vp_idx == fcport->vp_idx)
1918 qla2x00_schedule_rport_del(ha, fcport, defer);
1919 }
1920 atomic_set(&fcport->state, FCS_DEVICE_LOST); 1945 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1921 } 1946 }
1922
1923 if (defer)
1924 qla2xxx_wake_dpc(ha);
1925} 1947}
1926 1948
1927/* 1949/*
@@ -2156,7 +2178,7 @@ qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
2156static int 2178static int
2157qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked) 2179qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked)
2158{ 2180{
2159 unsigned long flags; 2181 unsigned long uninitialized_var(flags);
2160 scsi_qla_host_t *pha = to_qla_parent(ha); 2182 scsi_qla_host_t *pha = to_qla_parent(ha);
2161 2183
2162 if (!locked) 2184 if (!locked)
@@ -2313,8 +2335,10 @@ qla2x00_do_dpc(void *data)
2313 ha->host_no)); 2335 ha->host_no));
2314 } 2336 }
2315 2337
2316 if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) 2338 if (test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) {
2317 qla2x00_update_fcports(ha); 2339 qla2x00_update_fcports(ha);
2340 clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags);
2341 }
2318 2342
2319 if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) && 2343 if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) &&
2320 (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) { 2344 (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) {