aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas10
-rw-r--r--Documentation/scsi/LICENSE.qla2xxx2
-rw-r--r--Documentation/scsi/LICENSE.qla4xxx2
-rw-r--r--Documentation/scsi/st.txt6
-rw-r--r--MAINTAINERS8
-rw-r--r--drivers/ata/libata-core.c69
-rw-r--r--drivers/ata/libata-eh.c2
-rw-r--r--drivers/message/fusion/mptbase.c18
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c80
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c22
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h1
-rw-r--r--drivers/s390/scsi/zfcp_def.h2
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fc.c23
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c59
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c16
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c18
-rw-r--r--drivers/s390/scsi/zfcp_unit.c36
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c160
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h27
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c355
-rw-r--r--drivers/scsi/be2iscsi/be_main.c890
-rw-r--r--drivers/scsi/be2iscsi/be_main.h40
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c314
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h11
-rw-r--r--drivers/scsi/bfa/bfa_core.c21
-rw-r--r--drivers/scsi/bfa/bfa_cs.h4
-rw-r--r--drivers/scsi/bfa/bfa_defs_fcs.h18
-rw-r--r--drivers/scsi/bfa/bfa_fc.h10
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c21
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.h2
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c12
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c182
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h66
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c129
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c632
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c466
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c24
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h2
-rw-r--r--drivers/scsi/bfa/bfa_modules.h1
-rw-r--r--drivers/scsi/bfa/bfa_svc.c95
-rw-r--r--drivers/scsi/bfa/bfa_svc.h23
-rw-r--r--drivers/scsi/bfa/bfad.c236
-rw-r--r--drivers/scsi/bfa/bfad_attr.c46
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c39
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h2
-rw-r--r--drivers/scsi/bfa/bfad_drv.h5
-rw-r--r--drivers/scsi/bfa/bfad_im.c9
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c2
-rw-r--r--drivers/scsi/constants.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c26
-rw-r--r--drivers/scsi/hpsa.c39
-rw-r--r--drivers/scsi/ibmvscsi/Makefile6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c36
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c352
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h22
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c368
-rw-r--r--drivers/scsi/ipr.c168
-rw-r--r--drivers/scsi/ipr.h5
-rw-r--r--drivers/scsi/isci/host.c24
-rw-r--r--drivers/scsi/isci/host.h2
-rw-r--r--drivers/scsi/isci/init.c59
-rw-r--r--drivers/scsi/isci/phy.c4
-rw-r--r--drivers/scsi/isci/probe_roms.c1
-rw-r--r--drivers/scsi/isci/remote_node_context.h2
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c91
-rw-r--r--drivers/scsi/libsas/sas_discover.c69
-rw-r--r--drivers/scsi/libsas/sas_dump.c1
-rw-r--r--drivers/scsi/libsas/sas_event.c4
-rw-r--r--drivers/scsi/libsas/sas_init.c90
-rw-r--r--drivers/scsi/libsas/sas_internal.h1
-rw-r--r--drivers/scsi/libsas/sas_phy.c21
-rw-r--r--drivers/scsi/libsas/sas_port.c52
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c76
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c494
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h72
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c186
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c98
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h42
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c839
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c38
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c127
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c144
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c621
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h42
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h63
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h7
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c39
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c12
-rw-r--r--drivers/scsi/mpt2sas/Kconfig2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h14
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h9
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h8
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_raid.h7
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c6
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h14
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c38
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c80
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c59
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c147
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c376
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h30
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h166
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h49
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h39
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c565
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c261
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c437
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c224
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c151
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c770
-rw-r--r--drivers/scsi/qla2xxx/qla_settings.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c67
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla4xxx/Kconfig4
-rw-r--r--drivers/scsi/qla4xxx/Makefile2
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c1611
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.h283
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c26
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c32
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h65
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h59
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h94
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c23
-rw-r--r--drivers/scsi/qla4xxx/ql4_inline.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c28
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c406
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c186
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c1432
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h198
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c494
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h4
-rw-r--r--drivers/scsi/scsi_debug.c30
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c30
-rw-r--r--drivers/scsi/sd.c80
-rw-r--r--drivers/scsi/sd.h2
-rw-r--r--drivers/scsi/sd_dif.c25
-rw-r--r--drivers/scsi/st.c416
-rw-r--r--drivers/scsi/st.h5
-rw-r--r--include/linux/libata.h15
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/scsi/libsas.h20
-rw-r--r--include/scsi/sas_ata.h10
-rw-r--r--include/scsi/scsi_bsg_fc.h2
-rw-r--r--include/scsi/scsi_device.h4
-rw-r--r--include/scsi/scsi_devinfo.h1
-rw-r--r--include/scsi/scsi_host.h6
179 files changed, 13464 insertions, 4430 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index df43807bb5d..aab409f335b 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1350,6 +1350,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1350 * nohrst, nosrst, norst: suppress hard, soft 1350 * nohrst, nosrst, norst: suppress hard, soft
1351 and both resets. 1351 and both resets.
1352 1352
1353 * rstonce: only attempt one reset during
1354 hot-unplug link recovery
1355
1353 * dump_id: dump IDENTIFY data. 1356 * dump_id: dump IDENTIFY data.
1354 1357
1355 If there are multiple matching configurations changing 1358 If there are multiple matching configurations changing
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 80441ab608e..3a3079411a3 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,13 @@
1Release Date : Tue. Jun 17, 2012 17:00:00 PST 2012 -
2 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford/Kashyap Desai
4Current Version : 00.00.06.18-rc1
5Old Version : 00.00.06.15-rc1
6 1. Fix Copyright dates.
7 2. Add throttlequeuedepth module parameter.
8 3. Add resetwaittime module parameter.
9 4. Move poll_aen_lock initializer.
10-------------------------------------------------------------------------------
1Release Date : Mon. Mar 19, 2012 17:00:00 PST 2012 - 11Release Date : Mon. Mar 19, 2012 17:00:00 PST 2012 -
2 (emaild-id:megaraidlinux@lsi.com) 12 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford 13 Adam Radford
diff --git a/Documentation/scsi/LICENSE.qla2xxx b/Documentation/scsi/LICENSE.qla2xxx
index ce0fdf349a8..27a91cf43d6 100644
--- a/Documentation/scsi/LICENSE.qla2xxx
+++ b/Documentation/scsi/LICENSE.qla2xxx
@@ -1,4 +1,4 @@
1Copyright (c) 2003-2011 QLogic Corporation 1Copyright (c) 2003-2012 QLogic Corporation
2QLogic Linux FC-FCoE Driver 2QLogic Linux FC-FCoE Driver
3 3
4This program includes a device driver for Linux 3.x. 4This program includes a device driver for Linux 3.x.
diff --git a/Documentation/scsi/LICENSE.qla4xxx b/Documentation/scsi/LICENSE.qla4xxx
index ab899591ecb..78c169f0d7c 100644
--- a/Documentation/scsi/LICENSE.qla4xxx
+++ b/Documentation/scsi/LICENSE.qla4xxx
@@ -1,4 +1,4 @@
1Copyright (c) 2003-2011 QLogic Corporation 1Copyright (c) 2003-2012 QLogic Corporation
2QLogic Linux iSCSI Driver 2QLogic Linux iSCSI Driver
3 3
4This program includes a device driver for Linux 3.x. 4This program includes a device driver for Linux 3.x.
diff --git a/Documentation/scsi/st.txt b/Documentation/scsi/st.txt
index 685bf3582ab..f346abbdd6f 100644
--- a/Documentation/scsi/st.txt
+++ b/Documentation/scsi/st.txt
@@ -112,10 +112,8 @@ attempted).
112 112
113MINOR NUMBERS 113MINOR NUMBERS
114 114
115The tape driver currently supports 128 drives by default. This number 115The tape driver currently supports up to 2^17 drives if 4 modes for
116can be increased by editing st.h and recompiling the driver if 116each drive are used.
117necessary. The upper limit is 2^17 drives if 4 modes for each drive
118are used.
119 117
120The minor numbers consist of the following bit fields: 118The minor numbers consist of the following bit fields:
121 119
diff --git a/MAINTAINERS b/MAINTAINERS
index 78336396a43..cb009e4b229 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1650,7 +1650,6 @@ F: drivers/bcma/
1650F: include/linux/bcma/ 1650F: include/linux/bcma/
1651 1651
1652BROCADE BFA FC SCSI DRIVER 1652BROCADE BFA FC SCSI DRIVER
1653M: Jing Huang <huangj@brocade.com>
1654M: Krishna C Gudipati <kgudipat@brocade.com> 1653M: Krishna C Gudipati <kgudipat@brocade.com>
1655L: linux-scsi@vger.kernel.org 1654L: linux-scsi@vger.kernel.org
1656S: Supported 1655S: Supported
@@ -3438,6 +3437,13 @@ L: netdev@vger.kernel.org
3438S: Supported 3437S: Supported
3439F: drivers/net/ethernet/ibm/ibmveth.* 3438F: drivers/net/ethernet/ibm/ibmveth.*
3440 3439
3440IBM Power Virtual SCSI/FC Device Drivers
3441M: Robert Jennings <rcj@linux.vnet.ibm.com>
3442L: linux-scsi@vger.kernel.org
3443S: Supported
3444F: drivers/scsi/ibmvscsi/
3445X: drivers/scsi/ibmvscsi/ibmvstgt.c
3446
3441IBM ServeRAID RAID DRIVER 3447IBM ServeRAID RAID DRIVER
3442P: Jack Hammer 3448P: Jack Hammer
3443M: Dave Jeffery <ipslinux@adaptec.com> 3449M: Dave Jeffery <ipslinux@adaptec.com>
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 3492aa73d3a..3cc7096cfda 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5273,16 +5273,20 @@ bool ata_link_offline(struct ata_link *link)
5273#ifdef CONFIG_PM 5273#ifdef CONFIG_PM
5274static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, 5274static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5275 unsigned int action, unsigned int ehi_flags, 5275 unsigned int action, unsigned int ehi_flags,
5276 int wait) 5276 int *async)
5277{ 5277{
5278 struct ata_link *link; 5278 struct ata_link *link;
5279 unsigned long flags; 5279 unsigned long flags;
5280 int rc; 5280 int rc = 0;
5281 5281
5282 /* Previous resume operation might still be in 5282 /* Previous resume operation might still be in
5283 * progress. Wait for PM_PENDING to clear. 5283 * progress. Wait for PM_PENDING to clear.
5284 */ 5284 */
5285 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 5285 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5286 if (async) {
5287 *async = -EAGAIN;
5288 return 0;
5289 }
5286 ata_port_wait_eh(ap); 5290 ata_port_wait_eh(ap);
5287 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5291 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5288 } 5292 }
@@ -5291,10 +5295,10 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5291 spin_lock_irqsave(ap->lock, flags); 5295 spin_lock_irqsave(ap->lock, flags);
5292 5296
5293 ap->pm_mesg = mesg; 5297 ap->pm_mesg = mesg;
5294 if (wait) { 5298 if (async)
5295 rc = 0; 5299 ap->pm_result = async;
5300 else
5296 ap->pm_result = &rc; 5301 ap->pm_result = &rc;
5297 }
5298 5302
5299 ap->pflags |= ATA_PFLAG_PM_PENDING; 5303 ap->pflags |= ATA_PFLAG_PM_PENDING;
5300 ata_for_each_link(link, ap, HOST_FIRST) { 5304 ata_for_each_link(link, ap, HOST_FIRST) {
@@ -5307,7 +5311,7 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5307 spin_unlock_irqrestore(ap->lock, flags); 5311 spin_unlock_irqrestore(ap->lock, flags);
5308 5312
5309 /* wait and check result */ 5313 /* wait and check result */
5310 if (wait) { 5314 if (!async) {
5311 ata_port_wait_eh(ap); 5315 ata_port_wait_eh(ap);
5312 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5316 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5313 } 5317 }
@@ -5315,9 +5319,8 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5315 return rc; 5319 return rc;
5316} 5320}
5317 5321
5318static int ata_port_suspend_common(struct device *dev, pm_message_t mesg) 5322static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async)
5319{ 5323{
5320 struct ata_port *ap = to_ata_port(dev);
5321 unsigned int ehi_flags = ATA_EHI_QUIET; 5324 unsigned int ehi_flags = ATA_EHI_QUIET;
5322 int rc; 5325 int rc;
5323 5326
@@ -5332,10 +5335,17 @@ static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
5332 if (mesg.event == PM_EVENT_SUSPEND) 5335 if (mesg.event == PM_EVENT_SUSPEND)
5333 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY; 5336 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5334 5337
5335 rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, 1); 5338 rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
5336 return rc; 5339 return rc;
5337} 5340}
5338 5341
5342static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
5343{
5344 struct ata_port *ap = to_ata_port(dev);
5345
5346 return __ata_port_suspend_common(ap, mesg, NULL);
5347}
5348
5339static int ata_port_suspend(struct device *dev) 5349static int ata_port_suspend(struct device *dev)
5340{ 5350{
5341 if (pm_runtime_suspended(dev)) 5351 if (pm_runtime_suspended(dev))
@@ -5360,16 +5370,22 @@ static int ata_port_poweroff(struct device *dev)
5360 return ata_port_suspend_common(dev, PMSG_HIBERNATE); 5370 return ata_port_suspend_common(dev, PMSG_HIBERNATE);
5361} 5371}
5362 5372
5363static int ata_port_resume_common(struct device *dev) 5373static int __ata_port_resume_common(struct ata_port *ap, int *async)
5364{ 5374{
5365 struct ata_port *ap = to_ata_port(dev);
5366 int rc; 5375 int rc;
5367 5376
5368 rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET, 5377 rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET,
5369 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 1); 5378 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, async);
5370 return rc; 5379 return rc;
5371} 5380}
5372 5381
5382static int ata_port_resume_common(struct device *dev)
5383{
5384 struct ata_port *ap = to_ata_port(dev);
5385
5386 return __ata_port_resume_common(ap, NULL);
5387}
5388
5373static int ata_port_resume(struct device *dev) 5389static int ata_port_resume(struct device *dev)
5374{ 5390{
5375 int rc; 5391 int rc;
@@ -5402,6 +5418,24 @@ static const struct dev_pm_ops ata_port_pm_ops = {
5402 .runtime_idle = ata_port_runtime_idle, 5418 .runtime_idle = ata_port_runtime_idle,
5403}; 5419};
5404 5420
5421/* sas ports don't participate in pm runtime management of ata_ports,
5422 * and need to resume ata devices at the domain level, not the per-port
5423 * level. sas suspend/resume is async to allow parallel port recovery
5424 * since sas has multiple ata_port instances per Scsi_Host.
5425 */
5426int ata_sas_port_async_suspend(struct ata_port *ap, int *async)
5427{
5428 return __ata_port_suspend_common(ap, PMSG_SUSPEND, async);
5429}
5430EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend);
5431
5432int ata_sas_port_async_resume(struct ata_port *ap, int *async)
5433{
5434 return __ata_port_resume_common(ap, async);
5435}
5436EXPORT_SYMBOL_GPL(ata_sas_port_async_resume);
5437
5438
5405/** 5439/**
5406 * ata_host_suspend - suspend host 5440 * ata_host_suspend - suspend host
5407 * @host: host to suspend 5441 * @host: host to suspend
@@ -5947,24 +5981,18 @@ int ata_host_start(struct ata_host *host)
5947} 5981}
5948 5982
5949/** 5983/**
5950 * ata_sas_host_init - Initialize a host struct 5984 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
5951 * @host: host to initialize 5985 * @host: host to initialize
5952 * @dev: device host is attached to 5986 * @dev: device host is attached to
5953 * @flags: host flags
5954 * @ops: port_ops 5987 * @ops: port_ops
5955 * 5988 *
5956 * LOCKING:
5957 * PCI/etc. bus probe sem.
5958 *
5959 */ 5989 */
5960/* KILLME - the only user left is ipr */
5961void ata_host_init(struct ata_host *host, struct device *dev, 5990void ata_host_init(struct ata_host *host, struct device *dev,
5962 unsigned long flags, struct ata_port_operations *ops) 5991 struct ata_port_operations *ops)
5963{ 5992{
5964 spin_lock_init(&host->lock); 5993 spin_lock_init(&host->lock);
5965 mutex_init(&host->eh_mutex); 5994 mutex_init(&host->eh_mutex);
5966 host->dev = dev; 5995 host->dev = dev;
5967 host->flags = flags;
5968 host->ops = ops; 5996 host->ops = ops;
5969} 5997}
5970 5998
@@ -6408,6 +6436,7 @@ static int __init ata_parse_force_one(char **cur,
6408 { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, 6436 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6409 { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, 6437 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6410 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6438 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6439 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6411 }; 6440 };
6412 char *start = *cur, *p = *cur; 6441 char *start = *cur, *p = *cur;
6413 char *id, *val, *endp; 6442 char *id, *val, *endp;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 26598941e1b..e60437cd0d1 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2625,6 +2625,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
2625 */ 2625 */
2626 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2626 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2627 max_tries++; 2627 max_tries++;
2628 if (link->flags & ATA_LFLAG_RST_ONCE)
2629 max_tries = 1;
2628 if (link->flags & ATA_LFLAG_NO_HRST) 2630 if (link->flags & ATA_LFLAG_NO_HRST)
2629 hardreset = NULL; 2631 hardreset = NULL;
2630 if (link->flags & ATA_LFLAG_NO_SRST) 2632 if (link->flags & ATA_LFLAG_NO_SRST)
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index d99db5623ac..fb69baa06ca 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1666,7 +1666,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1666 if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) { 1666 if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) {
1667 printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with " 1667 printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with "
1668 "MEM failed\n", ioc->name); 1668 "MEM failed\n", ioc->name);
1669 return r; 1669 goto out_pci_disable_device;
1670 } 1670 }
1671 1671
1672 if (sizeof(dma_addr_t) > 4) { 1672 if (sizeof(dma_addr_t) > 4) {
@@ -1690,8 +1690,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1690 } else { 1690 } else {
1691 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n", 1691 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
1692 ioc->name, pci_name(pdev)); 1692 ioc->name, pci_name(pdev));
1693 pci_release_selected_regions(pdev, ioc->bars); 1693 goto out_pci_release_region;
1694 return r;
1695 } 1694 }
1696 } else { 1695 } else {
1697 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 1696 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
@@ -1704,8 +1703,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1704 } else { 1703 } else {
1705 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n", 1704 printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
1706 ioc->name, pci_name(pdev)); 1705 ioc->name, pci_name(pdev));
1707 pci_release_selected_regions(pdev, ioc->bars); 1706 goto out_pci_release_region;
1708 return r;
1709 } 1707 }
1710 } 1708 }
1711 1709
@@ -1735,8 +1733,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1735 if (mem == NULL) { 1733 if (mem == NULL) {
1736 printk(MYIOC_s_ERR_FMT ": ERROR - Unable to map adapter" 1734 printk(MYIOC_s_ERR_FMT ": ERROR - Unable to map adapter"
1737 " memory!\n", ioc->name); 1735 " memory!\n", ioc->name);
1738 pci_release_selected_regions(pdev, ioc->bars); 1736 r = -EINVAL;
1739 return -EINVAL; 1737 goto out_pci_release_region;
1740 } 1738 }
1741 ioc->memmap = mem; 1739 ioc->memmap = mem;
1742 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n", 1740 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
@@ -1750,6 +1748,12 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1750 ioc->pio_chip = (SYSIF_REGS __iomem *)port; 1748 ioc->pio_chip = (SYSIF_REGS __iomem *)port;
1751 1749
1752 return 0; 1750 return 0;
1751
1752out_pci_release_region:
1753 pci_release_selected_regions(pdev, ioc->bars);
1754out_pci_disable_device:
1755 pci_disable_device(pdev);
1756 return r;
1753} 1757}
1754 1758
1755/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1759/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index aff8621de80..f6adde44f22 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -519,6 +519,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
519 519
520 rwlock_init(&port->unit_list_lock); 520 rwlock_init(&port->unit_list_lock);
521 INIT_LIST_HEAD(&port->unit_list); 521 INIT_LIST_HEAD(&port->unit_list);
522 atomic_set(&port->units, 0);
522 523
523 INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); 524 INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
524 INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); 525 INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index e37f0455194..f2dd3a0a39e 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -39,19 +39,25 @@ void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
39 spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags); 39 spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
40} 40}
41 41
42static int zfcp_ccw_activate(struct ccw_device *cdev) 42/**
43 43 * zfcp_ccw_activate - activate adapter and wait for it to finish
44 * @cdev: pointer to belonging ccw device
45 * @clear: Status flags to clear.
46 * @tag: s390dbf trace record tag
47 */
48static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
44{ 49{
45 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); 50 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
46 51
47 if (!adapter) 52 if (!adapter)
48 return 0; 53 return 0;
49 54
55 zfcp_erp_clear_adapter_status(adapter, clear);
50 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); 56 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
51 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 57 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
52 "ccresu2"); 58 tag);
53 zfcp_erp_wait(adapter); 59 zfcp_erp_wait(adapter);
54 flush_work(&adapter->scan_work); 60 flush_work(&adapter->scan_work); /* ok to call even if nothing queued */
55 61
56 zfcp_ccw_adapter_put(adapter); 62 zfcp_ccw_adapter_put(adapter);
57 63
@@ -164,26 +170,34 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
164 BUG_ON(!zfcp_reqlist_isempty(adapter->req_list)); 170 BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
165 adapter->req_no = 0; 171 adapter->req_no = 0;
166 172
167 zfcp_ccw_activate(cdev); 173 zfcp_ccw_activate(cdev, 0, "ccsonl1");
174 /* scan for remote ports
175 either at the end of any successful adapter recovery
176 or only after the adapter recovery for setting a device online */
177 zfcp_fc_inverse_conditional_port_scan(adapter);
178 flush_work(&adapter->scan_work); /* ok to call even if nothing queued */
168 zfcp_ccw_adapter_put(adapter); 179 zfcp_ccw_adapter_put(adapter);
169 return 0; 180 return 0;
170} 181}
171 182
172/** 183/**
173 * zfcp_ccw_set_offline - set_offline function of zfcp driver 184 * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish
174 * @cdev: pointer to belonging ccw device 185 * @cdev: pointer to belonging ccw device
186 * @set: Status flags to set.
187 * @tag: s390dbf trace record tag
175 * 188 *
176 * This function gets called by the common i/o layer and sets an adapter 189 * This function gets called by the common i/o layer and sets an adapter
177 * into state offline. 190 * into state offline.
178 */ 191 */
179static int zfcp_ccw_set_offline(struct ccw_device *cdev) 192static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag)
180{ 193{
181 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); 194 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
182 195
183 if (!adapter) 196 if (!adapter)
184 return 0; 197 return 0;
185 198
186 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1"); 199 zfcp_erp_set_adapter_status(adapter, set);
200 zfcp_erp_adapter_shutdown(adapter, 0, tag);
187 zfcp_erp_wait(adapter); 201 zfcp_erp_wait(adapter);
188 202
189 zfcp_ccw_adapter_put(adapter); 203 zfcp_ccw_adapter_put(adapter);
@@ -191,6 +205,18 @@ static int zfcp_ccw_set_offline(struct ccw_device *cdev)
191} 205}
192 206
193/** 207/**
208 * zfcp_ccw_set_offline - set_offline function of zfcp driver
209 * @cdev: pointer to belonging ccw device
210 *
211 * This function gets called by the common i/o layer and sets an adapter
212 * into state offline.
213 */
214static int zfcp_ccw_set_offline(struct ccw_device *cdev)
215{
216 return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1");
217}
218
219/**
194 * zfcp_ccw_notify - ccw notify function 220 * zfcp_ccw_notify - ccw notify function
195 * @cdev: pointer to belonging ccw device 221 * @cdev: pointer to belonging ccw device
196 * @event: indicates if adapter was detached or attached 222 * @event: indicates if adapter was detached or attached
@@ -207,6 +233,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
207 233
208 switch (event) { 234 switch (event) {
209 case CIO_GONE: 235 case CIO_GONE:
236 if (atomic_read(&adapter->status) &
237 ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
238 zfcp_dbf_hba_basic("ccnigo1", adapter);
239 break;
240 }
210 dev_warn(&cdev->dev, "The FCP device has been detached\n"); 241 dev_warn(&cdev->dev, "The FCP device has been detached\n");
211 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1"); 242 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
212 break; 243 break;
@@ -216,6 +247,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
216 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2"); 247 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
217 break; 248 break;
218 case CIO_OPER: 249 case CIO_OPER:
250 if (atomic_read(&adapter->status) &
251 ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
252 zfcp_dbf_hba_basic("ccniop1", adapter);
253 break;
254 }
219 dev_info(&cdev->dev, "The FCP device is operational again\n"); 255 dev_info(&cdev->dev, "The FCP device is operational again\n");
220 zfcp_erp_set_adapter_status(adapter, 256 zfcp_erp_set_adapter_status(adapter,
221 ZFCP_STATUS_COMMON_RUNNING); 257 ZFCP_STATUS_COMMON_RUNNING);
@@ -251,6 +287,28 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
251 zfcp_ccw_adapter_put(adapter); 287 zfcp_ccw_adapter_put(adapter);
252} 288}
253 289
290static int zfcp_ccw_suspend(struct ccw_device *cdev)
291{
292 zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1");
293 return 0;
294}
295
296static int zfcp_ccw_thaw(struct ccw_device *cdev)
297{
298 /* trace records for thaw and final shutdown during suspend
299 can only be found in system dump until the end of suspend
300 but not after resume because it's based on the memory image
301 right after the very first suspend (freeze) callback */
302 zfcp_ccw_activate(cdev, 0, "ccthaw1");
303 return 0;
304}
305
306static int zfcp_ccw_resume(struct ccw_device *cdev)
307{
308 zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1");
309 return 0;
310}
311
254struct ccw_driver zfcp_ccw_driver = { 312struct ccw_driver zfcp_ccw_driver = {
255 .driver = { 313 .driver = {
256 .owner = THIS_MODULE, 314 .owner = THIS_MODULE,
@@ -263,7 +321,7 @@ struct ccw_driver zfcp_ccw_driver = {
263 .set_offline = zfcp_ccw_set_offline, 321 .set_offline = zfcp_ccw_set_offline,
264 .notify = zfcp_ccw_notify, 322 .notify = zfcp_ccw_notify,
265 .shutdown = zfcp_ccw_shutdown, 323 .shutdown = zfcp_ccw_shutdown,
266 .freeze = zfcp_ccw_set_offline, 324 .freeze = zfcp_ccw_suspend,
267 .thaw = zfcp_ccw_activate, 325 .thaw = zfcp_ccw_thaw,
268 .restore = zfcp_ccw_activate, 326 .restore = zfcp_ccw_resume,
269}; 327};
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index fbd8b4db602..49b82e46629 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -293,7 +293,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
293 } 293 }
294 read_unlock_irqrestore(&adapter->port_list_lock, flags); 294 read_unlock_irqrestore(&adapter->port_list_lock, flags);
295 295
296 shost_for_each_device(sdev, port->adapter->scsi_host) { 296 shost_for_each_device(sdev, adapter->scsi_host) {
297 zfcp_sdev = sdev_to_zfcp(sdev); 297 zfcp_sdev = sdev_to_zfcp(sdev);
298 status = atomic_read(&zfcp_sdev->status); 298 status = atomic_read(&zfcp_sdev->status);
299 if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) || 299 if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 3c1d22097ad..e1a8cc2526e 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -191,7 +191,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
191 length = min((u16)sizeof(struct qdio_buffer), 191 length = min((u16)sizeof(struct qdio_buffer),
192 (u16)ZFCP_DBF_PAY_MAX_REC); 192 (u16)ZFCP_DBF_PAY_MAX_REC);
193 193
194 while ((char *)pl[payload->counter] && payload->counter < scount) { 194 while (payload->counter < scount && (char *)pl[payload->counter]) {
195 memcpy(payload->data, (char *)pl[payload->counter], length); 195 memcpy(payload->data, (char *)pl[payload->counter], length);
196 debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length)); 196 debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
197 payload->counter++; 197 payload->counter++;
@@ -200,6 +200,26 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
200 spin_unlock_irqrestore(&dbf->pay_lock, flags); 200 spin_unlock_irqrestore(&dbf->pay_lock, flags);
201} 201}
202 202
203/**
204 * zfcp_dbf_hba_basic - trace event for basic adapter events
205 * @adapter: pointer to struct zfcp_adapter
206 */
207void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
208{
209 struct zfcp_dbf *dbf = adapter->dbf;
210 struct zfcp_dbf_hba *rec = &dbf->hba_buf;
211 unsigned long flags;
212
213 spin_lock_irqsave(&dbf->hba_lock, flags);
214 memset(rec, 0, sizeof(*rec));
215
216 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
217 rec->id = ZFCP_DBF_HBA_BASIC;
218
219 debug_event(dbf->hba, 1, rec, sizeof(*rec));
220 spin_unlock_irqrestore(&dbf->hba_lock, flags);
221}
222
203static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec, 223static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
204 struct zfcp_adapter *adapter, 224 struct zfcp_adapter *adapter,
205 struct zfcp_port *port, 225 struct zfcp_port *port,
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 714f087eb7a..3ac7a4b30dd 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -154,6 +154,7 @@ enum zfcp_dbf_hba_id {
154 ZFCP_DBF_HBA_RES = 1, 154 ZFCP_DBF_HBA_RES = 1,
155 ZFCP_DBF_HBA_USS = 2, 155 ZFCP_DBF_HBA_USS = 2,
156 ZFCP_DBF_HBA_BIT = 3, 156 ZFCP_DBF_HBA_BIT = 3,
157 ZFCP_DBF_HBA_BASIC = 4,
157}; 158};
158 159
159/** 160/**
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 2955e1a3dea..1305955cbf5 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -77,6 +77,7 @@ struct zfcp_reqlist;
77#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004 77#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
78#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 78#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
79#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 79#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
80#define ZFCP_STATUS_ADAPTER_SUSPENDED 0x00000040
80#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 81#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
81#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 82#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
82#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400 83#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400
@@ -204,6 +205,7 @@ struct zfcp_port {
204 struct zfcp_adapter *adapter; /* adapter used to access port */ 205 struct zfcp_adapter *adapter; /* adapter used to access port */
205 struct list_head unit_list; /* head of logical unit list */ 206 struct list_head unit_list; /* head of logical unit list */
206 rwlock_t unit_list_lock; /* unit list lock */ 207 rwlock_t unit_list_lock; /* unit list lock */
208 atomic_t units; /* zfcp_unit count */
207 atomic_t status; /* status of this remote port */ 209 atomic_t status; /* status of this remote port */
208 u64 wwnn; /* WWNN if known */ 210 u64 wwnn; /* WWNN if known */
209 u64 wwpn; /* WWPN */ 211 u64 wwpn; /* WWPN */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 92d3df6ac8b..4133ab6e20f 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -1230,7 +1230,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1230 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1230 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1231 if (result == ZFCP_ERP_SUCCEEDED) { 1231 if (result == ZFCP_ERP_SUCCEEDED) {
1232 register_service_level(&adapter->service_level); 1232 register_service_level(&adapter->service_level);
1233 queue_work(adapter->work_queue, &adapter->scan_work); 1233 zfcp_fc_conditional_port_scan(adapter);
1234 queue_work(adapter->work_queue, &adapter->ns_up_work); 1234 queue_work(adapter->work_queue, &adapter->ns_up_work);
1235 } else 1235 } else
1236 unregister_service_level(&adapter->service_level); 1236 unregister_service_level(&adapter->service_level);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 36f422770ff..1d3dd3f7d69 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -54,6 +54,7 @@ extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
54extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *); 54extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
55extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); 55extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
56extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **); 56extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
57extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
57extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32); 58extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
58extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); 59extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
59extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); 60extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
@@ -98,6 +99,8 @@ extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
98extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); 99extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
99extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *); 100extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
100extern void zfcp_fc_sym_name_update(struct work_struct *); 101extern void zfcp_fc_sym_name_update(struct work_struct *);
102extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
103extern void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *);
101 104
102/* zfcp_fsf.c */ 105/* zfcp_fsf.c */
103extern struct kmem_cache *zfcp_fsf_qtcb_cache; 106extern struct kmem_cache *zfcp_fsf_qtcb_cache;
@@ -158,6 +161,7 @@ extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
158extern struct attribute_group zfcp_sysfs_unit_attrs; 161extern struct attribute_group zfcp_sysfs_unit_attrs;
159extern struct attribute_group zfcp_sysfs_adapter_attrs; 162extern struct attribute_group zfcp_sysfs_adapter_attrs;
160extern struct attribute_group zfcp_sysfs_port_attrs; 163extern struct attribute_group zfcp_sysfs_port_attrs;
164extern struct mutex zfcp_sysfs_port_units_mutex;
161extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; 165extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
162extern struct device_attribute *zfcp_sysfs_shost_attrs[]; 166extern struct device_attribute *zfcp_sysfs_shost_attrs[];
163 167
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 88688a80b2c..ff598cd68b2 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -26,6 +26,27 @@ static u32 zfcp_fc_rscn_range_mask[] = {
26 [ELS_ADDR_FMT_FAB] = 0x000000, 26 [ELS_ADDR_FMT_FAB] = 0x000000,
27}; 27};
28 28
29static bool no_auto_port_rescan;
30module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600);
31MODULE_PARM_DESC(no_auto_port_rescan,
32 "no automatic port_rescan (default off)");
33
34void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter)
35{
36 if (no_auto_port_rescan)
37 return;
38
39 queue_work(adapter->work_queue, &adapter->scan_work);
40}
41
42void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
43{
44 if (!no_auto_port_rescan)
45 return;
46
47 queue_work(adapter->work_queue, &adapter->scan_work);
48}
49
29/** 50/**
30 * zfcp_fc_post_event - post event to userspace via fc_transport 51 * zfcp_fc_post_event - post event to userspace via fc_transport
31 * @work: work struct with enqueued events 52 * @work: work struct with enqueued events
@@ -206,7 +227,7 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
206 zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN, 227 zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
207 *(u32 *)page); 228 *(u32 *)page);
208 } 229 }
209 queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work); 230 zfcp_fc_conditional_port_scan(fsf_req->adapter);
210} 231}
211 232
212static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn) 233static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index e1c1efc2c5a..c96320d79fb 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -219,7 +219,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
219 return; 219 return;
220 } 220 }
221 221
222 zfcp_dbf_hba_fsf_uss("fssrh_2", req); 222 zfcp_dbf_hba_fsf_uss("fssrh_4", req);
223 223
224 switch (sr_buf->status_type) { 224 switch (sr_buf->status_type) {
225 case FSF_STATUS_READ_PORT_CLOSED: 225 case FSF_STATUS_READ_PORT_CLOSED:
@@ -257,7 +257,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
257 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) 257 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
258 zfcp_cfdc_adapter_access_changed(adapter); 258 zfcp_cfdc_adapter_access_changed(adapter);
259 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 259 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
260 queue_work(adapter->work_queue, &adapter->scan_work); 260 zfcp_fc_conditional_port_scan(adapter);
261 break; 261 break;
262 case FSF_STATUS_READ_CFDC_UPDATED: 262 case FSF_STATUS_READ_CFDC_UPDATED:
263 zfcp_cfdc_adapter_access_changed(adapter); 263 zfcp_cfdc_adapter_access_changed(adapter);
@@ -437,6 +437,34 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
437 } 437 }
438} 438}
439 439
440#define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0)
441#define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1)
442#define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2)
443#define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
444#define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
445#define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
446#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
447
448static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
449{
450 u32 fdmi_speed = 0;
451 if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
452 fdmi_speed |= FC_PORTSPEED_1GBIT;
453 if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
454 fdmi_speed |= FC_PORTSPEED_2GBIT;
455 if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
456 fdmi_speed |= FC_PORTSPEED_4GBIT;
457 if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
458 fdmi_speed |= FC_PORTSPEED_10GBIT;
459 if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
460 fdmi_speed |= FC_PORTSPEED_8GBIT;
461 if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
462 fdmi_speed |= FC_PORTSPEED_16GBIT;
463 if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
464 fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
465 return fdmi_speed;
466}
467
440static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) 468static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
441{ 469{
442 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config; 470 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
@@ -456,7 +484,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
456 fc_host_port_name(shost) = nsp->fl_wwpn; 484 fc_host_port_name(shost) = nsp->fl_wwpn;
457 fc_host_node_name(shost) = nsp->fl_wwnn; 485 fc_host_node_name(shost) = nsp->fl_wwnn;
458 fc_host_port_id(shost) = ntoh24(bottom->s_id); 486 fc_host_port_id(shost) = ntoh24(bottom->s_id);
459 fc_host_speed(shost) = bottom->fc_link_speed; 487 fc_host_speed(shost) =
488 zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
460 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; 489 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
461 490
462 adapter->hydra_version = bottom->adapter_type; 491 adapter->hydra_version = bottom->adapter_type;
@@ -580,7 +609,8 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
580 } else 609 } else
581 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 610 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
582 fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 611 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
583 fc_host_supported_speeds(shost) = bottom->supported_speed; 612 fc_host_supported_speeds(shost) =
613 zfcp_fsf_convert_portspeed(bottom->supported_speed);
584 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types, 614 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
585 FC_FC4_LIST_SIZE); 615 FC_FC4_LIST_SIZE);
586 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types, 616 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
@@ -771,12 +801,14 @@ out:
771static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) 801static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
772{ 802{
773 struct scsi_device *sdev = req->data; 803 struct scsi_device *sdev = req->data;
774 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 804 struct zfcp_scsi_dev *zfcp_sdev;
775 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; 805 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
776 806
777 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 807 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
778 return; 808 return;
779 809
810 zfcp_sdev = sdev_to_zfcp(sdev);
811
780 switch (req->qtcb->header.fsf_status) { 812 switch (req->qtcb->header.fsf_status) {
781 case FSF_PORT_HANDLE_NOT_VALID: 813 case FSF_PORT_HANDLE_NOT_VALID:
782 if (fsq->word[0] == fsq->word[1]) { 814 if (fsq->word[0] == fsq->word[1]) {
@@ -885,7 +917,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
885 917
886 switch (header->fsf_status) { 918 switch (header->fsf_status) {
887 case FSF_GOOD: 919 case FSF_GOOD:
888 zfcp_dbf_san_res("fsscth1", req); 920 zfcp_dbf_san_res("fsscth2", req);
889 ct->status = 0; 921 ct->status = 0;
890 break; 922 break;
891 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 923 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -1739,13 +1771,15 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1739{ 1771{
1740 struct zfcp_adapter *adapter = req->adapter; 1772 struct zfcp_adapter *adapter = req->adapter;
1741 struct scsi_device *sdev = req->data; 1773 struct scsi_device *sdev = req->data;
1742 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 1774 struct zfcp_scsi_dev *zfcp_sdev;
1743 struct fsf_qtcb_header *header = &req->qtcb->header; 1775 struct fsf_qtcb_header *header = &req->qtcb->header;
1744 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support; 1776 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1745 1777
1746 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1778 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1747 return; 1779 return;
1748 1780
1781 zfcp_sdev = sdev_to_zfcp(sdev);
1782
1749 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1783 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1750 ZFCP_STATUS_COMMON_ACCESS_BOXED | 1784 ZFCP_STATUS_COMMON_ACCESS_BOXED |
1751 ZFCP_STATUS_LUN_SHARED | 1785 ZFCP_STATUS_LUN_SHARED |
@@ -1856,11 +1890,13 @@ out:
1856static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) 1890static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1857{ 1891{
1858 struct scsi_device *sdev = req->data; 1892 struct scsi_device *sdev = req->data;
1859 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 1893 struct zfcp_scsi_dev *zfcp_sdev;
1860 1894
1861 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1895 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1862 return; 1896 return;
1863 1897
1898 zfcp_sdev = sdev_to_zfcp(sdev);
1899
1864 switch (req->qtcb->header.fsf_status) { 1900 switch (req->qtcb->header.fsf_status) {
1865 case FSF_PORT_HANDLE_NOT_VALID: 1901 case FSF_PORT_HANDLE_NOT_VALID:
1866 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1"); 1902 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
@@ -1950,7 +1986,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
1950{ 1986{
1951 struct fsf_qual_latency_info *lat_in; 1987 struct fsf_qual_latency_info *lat_in;
1952 struct latency_cont *lat = NULL; 1988 struct latency_cont *lat = NULL;
1953 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device); 1989 struct zfcp_scsi_dev *zfcp_sdev;
1954 struct zfcp_blk_drv_data blktrc; 1990 struct zfcp_blk_drv_data blktrc;
1955 int ticks = req->adapter->timer_ticks; 1991 int ticks = req->adapter->timer_ticks;
1956 1992
@@ -1965,6 +2001,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
1965 2001
1966 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && 2002 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
1967 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2003 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2004 zfcp_sdev = sdev_to_zfcp(scsi->device);
1968 blktrc.flags |= ZFCP_BLK_LAT_VALID; 2005 blktrc.flags |= ZFCP_BLK_LAT_VALID;
1969 blktrc.channel_lat = lat_in->channel_lat * ticks; 2006 blktrc.channel_lat = lat_in->channel_lat * ticks;
1970 blktrc.fabric_lat = lat_in->fabric_lat * ticks; 2007 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
@@ -2002,12 +2039,14 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
2002{ 2039{
2003 struct scsi_cmnd *scmnd = req->data; 2040 struct scsi_cmnd *scmnd = req->data;
2004 struct scsi_device *sdev = scmnd->device; 2041 struct scsi_device *sdev = scmnd->device;
2005 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2042 struct zfcp_scsi_dev *zfcp_sdev;
2006 struct fsf_qtcb_header *header = &req->qtcb->header; 2043 struct fsf_qtcb_header *header = &req->qtcb->header;
2007 2044
2008 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2045 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2009 return; 2046 return;
2010 2047
2048 zfcp_sdev = sdev_to_zfcp(sdev);
2049
2011 switch (header->fsf_status) { 2050 switch (header->fsf_status) {
2012 case FSF_HANDLE_MISMATCH: 2051 case FSF_HANDLE_MISMATCH:
2013 case FSF_PORT_HANDLE_NOT_VALID: 2052 case FSF_PORT_HANDLE_NOT_VALID:
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index b9fffc8d94a..50b5615848f 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -102,18 +102,22 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
102{ 102{
103 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 103 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
104 struct zfcp_adapter *adapter = qdio->adapter; 104 struct zfcp_adapter *adapter = qdio->adapter;
105 struct qdio_buffer_element *sbale;
106 int sbal_no, sbal_idx; 105 int sbal_no, sbal_idx;
107 void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
108 u64 req_id;
109 u8 scount;
110 106
111 if (unlikely(qdio_err)) { 107 if (unlikely(qdio_err)) {
112 memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
113 if (zfcp_adapter_multi_buffer_active(adapter)) { 108 if (zfcp_adapter_multi_buffer_active(adapter)) {
109 void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
110 struct qdio_buffer_element *sbale;
111 u64 req_id;
112 u8 scount;
113
114 memset(pl, 0,
115 ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
114 sbale = qdio->res_q[idx]->element; 116 sbale = qdio->res_q[idx]->element;
115 req_id = (u64) sbale->addr; 117 req_id = (u64) sbale->addr;
116 scount = sbale->scount + 1; /* incl. signaling SBAL */ 118 scount = min(sbale->scount + 1,
119 ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
120 /* incl. signaling SBAL */
117 121
118 for (sbal_no = 0; sbal_no < scount; sbal_no++) { 122 for (sbal_no = 0; sbal_no < scount; sbal_no++) {
119 sbal_idx = (idx + sbal_no) % 123 sbal_idx = (idx + sbal_no) %
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index c66af27b230..1e0eb089dfb 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -227,6 +227,8 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
227static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, 227static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
228 zfcp_sysfs_port_rescan_store); 228 zfcp_sysfs_port_rescan_store);
229 229
230DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
231
230static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, 232static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
231 struct device_attribute *attr, 233 struct device_attribute *attr,
232 const char *buf, size_t count) 234 const char *buf, size_t count)
@@ -249,6 +251,16 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
249 else 251 else
250 retval = 0; 252 retval = 0;
251 253
254 mutex_lock(&zfcp_sysfs_port_units_mutex);
255 if (atomic_read(&port->units) > 0) {
256 retval = -EBUSY;
257 mutex_unlock(&zfcp_sysfs_port_units_mutex);
258 goto out;
259 }
260 /* port is about to be removed, so no more unit_add */
261 atomic_set(&port->units, -1);
262 mutex_unlock(&zfcp_sysfs_port_units_mutex);
263
252 write_lock_irq(&adapter->port_list_lock); 264 write_lock_irq(&adapter->port_list_lock);
253 list_del(&port->list); 265 list_del(&port->list);
254 write_unlock_irq(&adapter->port_list_lock); 266 write_unlock_irq(&adapter->port_list_lock);
@@ -289,12 +301,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
289{ 301{
290 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); 302 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
291 u64 fcp_lun; 303 u64 fcp_lun;
304 int retval;
292 305
293 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) 306 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
294 return -EINVAL; 307 return -EINVAL;
295 308
296 if (zfcp_unit_add(port, fcp_lun)) 309 retval = zfcp_unit_add(port, fcp_lun);
297 return -EINVAL; 310 if (retval)
311 return retval;
298 312
299 return count; 313 return count;
300} 314}
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 3f2bff0d3aa..1cd2b99ab25 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -104,7 +104,7 @@ static void zfcp_unit_release(struct device *dev)
104{ 104{
105 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); 105 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
106 106
107 put_device(&unit->port->dev); 107 atomic_dec(&unit->port->units);
108 kfree(unit); 108 kfree(unit);
109} 109}
110 110
@@ -119,16 +119,27 @@ static void zfcp_unit_release(struct device *dev)
119int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) 119int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
120{ 120{
121 struct zfcp_unit *unit; 121 struct zfcp_unit *unit;
122 int retval = 0;
123
124 mutex_lock(&zfcp_sysfs_port_units_mutex);
125 if (atomic_read(&port->units) == -1) {
126 /* port is already gone */
127 retval = -ENODEV;
128 goto out;
129 }
122 130
123 unit = zfcp_unit_find(port, fcp_lun); 131 unit = zfcp_unit_find(port, fcp_lun);
124 if (unit) { 132 if (unit) {
125 put_device(&unit->dev); 133 put_device(&unit->dev);
126 return -EEXIST; 134 retval = -EEXIST;
135 goto out;
127 } 136 }
128 137
129 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); 138 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
130 if (!unit) 139 if (!unit) {
131 return -ENOMEM; 140 retval = -ENOMEM;
141 goto out;
142 }
132 143
133 unit->port = port; 144 unit->port = port;
134 unit->fcp_lun = fcp_lun; 145 unit->fcp_lun = fcp_lun;
@@ -139,28 +150,33 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
139 if (dev_set_name(&unit->dev, "0x%016llx", 150 if (dev_set_name(&unit->dev, "0x%016llx",
140 (unsigned long long) fcp_lun)) { 151 (unsigned long long) fcp_lun)) {
141 kfree(unit); 152 kfree(unit);
142 return -ENOMEM; 153 retval = -ENOMEM;
154 goto out;
143 } 155 }
144 156
145 get_device(&port->dev);
146
147 if (device_register(&unit->dev)) { 157 if (device_register(&unit->dev)) {
148 put_device(&unit->dev); 158 put_device(&unit->dev);
149 return -ENOMEM; 159 retval = -ENOMEM;
160 goto out;
150 } 161 }
151 162
152 if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) { 163 if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
153 device_unregister(&unit->dev); 164 device_unregister(&unit->dev);
154 return -EINVAL; 165 retval = -EINVAL;
166 goto out;
155 } 167 }
156 168
169 atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
170
157 write_lock_irq(&port->unit_list_lock); 171 write_lock_irq(&port->unit_list_lock);
158 list_add_tail(&unit->list, &port->unit_list); 172 list_add_tail(&unit->list, &port->unit_list);
159 write_unlock_irq(&port->unit_list_lock); 173 write_unlock_irq(&port->unit_list_lock);
160 174
161 zfcp_unit_scsi_scan(unit); 175 zfcp_unit_scsi_scan(unit);
162 176
163 return 0; 177out:
178 mutex_unlock(&zfcp_sysfs_port_units_mutex);
179 return retval;
164} 180}
165 181
166/** 182/**
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index d2e9e933f7a..07d2cb126d9 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -48,7 +48,8 @@ int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
48 } 48 }
49 49
50 if (sreset & BE2_SET_RESET) { 50 if (sreset & BE2_SET_RESET) {
51 printk(KERN_ERR "Soft Reset did not deassert\n"); 51 printk(KERN_ERR DRV_NAME
52 " Soft Reset did not deassert\n");
52 return -EIO; 53 return -EIO;
53 } 54 }
54 pconline1 = BE2_MPU_IRAM_ONLINE; 55 pconline1 = BE2_MPU_IRAM_ONLINE;
@@ -67,7 +68,8 @@ int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
67 i++; 68 i++;
68 } 69 }
69 if (sreset & BE2_SET_RESET) { 70 if (sreset & BE2_SET_RESET) {
70 printk(KERN_ERR "MPU Online Soft Reset did not deassert\n"); 71 printk(KERN_ERR DRV_NAME
72 " MPU Online Soft Reset did not deassert\n");
71 return -EIO; 73 return -EIO;
72 } 74 }
73 return 0; 75 return 0;
@@ -93,8 +95,9 @@ int be_chk_reset_complete(struct beiscsi_hba *phba)
93 } 95 }
94 96
95 if ((status & 0x80000000) || (!num_loop)) { 97 if ((status & 0x80000000) || (!num_loop)) {
96 printk(KERN_ERR "Failed in be_chk_reset_complete" 98 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
97 "status = 0x%x\n", status); 99 "BC_%d : Failed in be_chk_reset_complete"
100 "status = 0x%x\n", status);
98 return -EIO; 101 return -EIO;
99 } 102 }
100 103
@@ -169,6 +172,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
169 struct be_mcc_compl *compl) 172 struct be_mcc_compl *compl)
170{ 173{
171 u16 compl_status, extd_status; 174 u16 compl_status, extd_status;
175 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
172 176
173 be_dws_le_to_cpu(compl, 4); 177 be_dws_le_to_cpu(compl, 4);
174 178
@@ -177,9 +181,12 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
177 if (compl_status != MCC_STATUS_SUCCESS) { 181 if (compl_status != MCC_STATUS_SUCCESS) {
178 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & 182 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
179 CQE_STATUS_EXTD_MASK; 183 CQE_STATUS_EXTD_MASK;
180 dev_err(&ctrl->pdev->dev, 184
181 "error in cmd completion: status(compl/extd)=%d/%d\n", 185 beiscsi_log(phba, KERN_ERR,
182 compl_status, extd_status); 186 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
187 "BC_%d : error in cmd completion: status(compl/extd)=%d/%d\n",
188 compl_status, extd_status);
189
183 return -EBUSY; 190 return -EBUSY;
184 } 191 }
185 return 0; 192 return 0;
@@ -233,22 +240,29 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
233{ 240{
234 switch (evt->port_link_status) { 241 switch (evt->port_link_status) {
235 case ASYNC_EVENT_LINK_DOWN: 242 case ASYNC_EVENT_LINK_DOWN:
236 SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d\n", 243 beiscsi_log(phba, KERN_ERR,
237 evt->physical_port); 244 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
245 "BC_%d : Link Down on Physical Port %d\n",
246 evt->physical_port);
247
238 phba->state |= BE_ADAPTER_LINK_DOWN; 248 phba->state |= BE_ADAPTER_LINK_DOWN;
239 iscsi_host_for_each_session(phba->shost, 249 iscsi_host_for_each_session(phba->shost,
240 be2iscsi_fail_session); 250 be2iscsi_fail_session);
241 break; 251 break;
242 case ASYNC_EVENT_LINK_UP: 252 case ASYNC_EVENT_LINK_UP:
243 phba->state = BE_ADAPTER_UP; 253 phba->state = BE_ADAPTER_UP;
244 SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d\n", 254 beiscsi_log(phba, KERN_ERR,
245 evt->physical_port); 255 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
256 "BC_%d : Link UP on Physical Port %d\n",
257 evt->physical_port);
246 break; 258 break;
247 default: 259 default:
248 SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on" 260 beiscsi_log(phba, KERN_ERR,
249 "Physical Port %d\n", 261 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
250 evt->port_link_status, 262 "BC_%d : Unexpected Async Notification %d on"
251 evt->physical_port); 263 "Physical Port %d\n",
264 evt->port_link_status,
265 evt->physical_port);
252 } 266 }
253} 267}
254 268
@@ -279,9 +293,11 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
279 beiscsi_async_link_state_process(phba, 293 beiscsi_async_link_state_process(phba,
280 (struct be_async_event_link_state *) compl); 294 (struct be_async_event_link_state *) compl);
281 else 295 else
282 SE_DEBUG(DBG_LVL_1, 296 beiscsi_log(phba, KERN_ERR,
283 " Unsupported Async Event, flags" 297 BEISCSI_LOG_CONFIG |
284 " = 0x%08x\n", compl->flags); 298 BEISCSI_LOG_MBOX,
299 "BC_%d : Unsupported Async Event, flags"
300 " = 0x%08x\n", compl->flags);
285 301
286 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 302 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
287 status = be_mcc_compl_process(ctrl, compl); 303 status = be_mcc_compl_process(ctrl, compl);
@@ -312,7 +328,10 @@ static int be_mcc_wait_compl(struct beiscsi_hba *phba)
312 udelay(100); 328 udelay(100);
313 } 329 }
314 if (i == mcc_timeout) { 330 if (i == mcc_timeout) {
315 dev_err(&phba->pcidev->dev, "mccq poll timed out\n"); 331 beiscsi_log(phba, KERN_ERR,
332 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
333 "BC_%d : mccq poll timed out\n");
334
316 return -EBUSY; 335 return -EBUSY;
317 } 336 }
318 return 0; 337 return 0;
@@ -338,7 +357,11 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
338 break; 357 break;
339 358
340 if (cnt > 12000000) { 359 if (cnt > 12000000) {
341 dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n"); 360 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
361 beiscsi_log(phba, KERN_ERR,
362 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
363 "BC_%d : mbox_db poll timed out\n");
364
342 return -EBUSY; 365 return -EBUSY;
343 } 366 }
344 367
@@ -360,6 +383,7 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
360 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem; 383 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
361 struct be_mcc_mailbox *mbox = mbox_mem->va; 384 struct be_mcc_mailbox *mbox = mbox_mem->va;
362 struct be_mcc_compl *compl = &mbox->compl; 385 struct be_mcc_compl *compl = &mbox->compl;
386 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
363 387
364 val &= ~MPU_MAILBOX_DB_RDY_MASK; 388 val &= ~MPU_MAILBOX_DB_RDY_MASK;
365 val |= MPU_MAILBOX_DB_HI_MASK; 389 val |= MPU_MAILBOX_DB_HI_MASK;
@@ -368,7 +392,10 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
368 392
369 status = be_mbox_db_ready_wait(ctrl); 393 status = be_mbox_db_ready_wait(ctrl);
370 if (status != 0) { 394 if (status != 0) {
371 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n"); 395 beiscsi_log(phba, KERN_ERR,
396 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
397 "BC_%d : be_mbox_db_ready_wait failed\n");
398
372 return status; 399 return status;
373 } 400 }
374 val = 0; 401 val = 0;
@@ -379,18 +406,27 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
379 406
380 status = be_mbox_db_ready_wait(ctrl); 407 status = be_mbox_db_ready_wait(ctrl);
381 if (status != 0) { 408 if (status != 0) {
382 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed\n"); 409 beiscsi_log(phba, KERN_ERR,
410 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
411 "BC_%d : be_mbox_db_ready_wait failed\n");
412
383 return status; 413 return status;
384 } 414 }
385 if (be_mcc_compl_is_new(compl)) { 415 if (be_mcc_compl_is_new(compl)) {
386 status = be_mcc_compl_process(ctrl, &mbox->compl); 416 status = be_mcc_compl_process(ctrl, &mbox->compl);
387 be_mcc_compl_use(compl); 417 be_mcc_compl_use(compl);
388 if (status) { 418 if (status) {
389 SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process\n"); 419 beiscsi_log(phba, KERN_ERR,
420 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
421 "BC_%d : After be_mcc_compl_process\n");
422
390 return status; 423 return status;
391 } 424 }
392 } else { 425 } else {
393 dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n"); 426 beiscsi_log(phba, KERN_ERR,
427 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
428 "BC_%d : Invalid Mailbox Completion\n");
429
394 return -EBUSY; 430 return -EBUSY;
395 } 431 }
396 return 0; 432 return 0;
@@ -436,7 +472,10 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba)
436 if (status) 472 if (status)
437 return status; 473 return status;
438 } else { 474 } else {
439 dev_err(&phba->pcidev->dev, "invalid mailbox completion\n"); 475 beiscsi_log(phba, KERN_ERR,
476 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
477 "BC_%d : invalid mailbox completion\n");
478
440 return -EBUSY; 479 return -EBUSY;
441 } 480 }
442 return 0; 481 return 0;
@@ -528,7 +567,6 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
528 struct be_dma_mem *q_mem = &eq->dma_mem; 567 struct be_dma_mem *q_mem = &eq->dma_mem;
529 int status; 568 int status;
530 569
531 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n");
532 spin_lock(&ctrl->mbox_lock); 570 spin_lock(&ctrl->mbox_lock);
533 memset(wrb, 0, sizeof(*wrb)); 571 memset(wrb, 0, sizeof(*wrb));
534 572
@@ -563,10 +601,10 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
563int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) 601int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
564{ 602{
565 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 603 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
604 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
566 int status; 605 int status;
567 u8 *endian_check; 606 u8 *endian_check;
568 607
569 SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n");
570 spin_lock(&ctrl->mbox_lock); 608 spin_lock(&ctrl->mbox_lock);
571 memset(wrb, 0, sizeof(*wrb)); 609 memset(wrb, 0, sizeof(*wrb));
572 610
@@ -583,7 +621,8 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
583 621
584 status = be_mbox_notify(ctrl); 622 status = be_mbox_notify(ctrl);
585 if (status) 623 if (status)
586 SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed\n"); 624 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
625 "BC_%d : be_cmd_fw_initialize Failed\n");
587 626
588 spin_unlock(&ctrl->mbox_lock); 627 spin_unlock(&ctrl->mbox_lock);
589 return status; 628 return status;
@@ -596,11 +635,11 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
596 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 635 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
597 struct be_cmd_req_cq_create *req = embedded_payload(wrb); 636 struct be_cmd_req_cq_create *req = embedded_payload(wrb);
598 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); 637 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
638 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
599 struct be_dma_mem *q_mem = &cq->dma_mem; 639 struct be_dma_mem *q_mem = &cq->dma_mem;
600 void *ctxt = &req->context; 640 void *ctxt = &req->context;
601 int status; 641 int status;
602 642
603 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create\n");
604 spin_lock(&ctrl->mbox_lock); 643 spin_lock(&ctrl->mbox_lock);
605 memset(wrb, 0, sizeof(*wrb)); 644 memset(wrb, 0, sizeof(*wrb));
606 645
@@ -608,8 +647,6 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
608 647
609 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 648 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
610 OPCODE_COMMON_CQ_CREATE, sizeof(*req)); 649 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
611 if (!q_mem->va)
612 SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
613 650
614 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 651 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
615 652
@@ -633,8 +670,10 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
633 cq->id = le16_to_cpu(resp->cq_id); 670 cq->id = le16_to_cpu(resp->cq_id);
634 cq->created = true; 671 cq->created = true;
635 } else 672 } else
636 SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x\n", 673 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
637 status); 674 "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
675 status);
676
638 spin_unlock(&ctrl->mbox_lock); 677 spin_unlock(&ctrl->mbox_lock);
639 678
640 return status; 679 return status;
@@ -700,10 +739,14 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
700{ 739{
701 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 740 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
702 struct be_cmd_req_q_destroy *req = embedded_payload(wrb); 741 struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
742 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
703 u8 subsys = 0, opcode = 0; 743 u8 subsys = 0, opcode = 0;
704 int status; 744 int status;
705 745
706 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy\n"); 746 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
747 "BC_%d : In beiscsi_cmd_q_destroy "
748 "queue_type : %d\n", queue_type);
749
707 spin_lock(&ctrl->mbox_lock); 750 spin_lock(&ctrl->mbox_lock);
708 memset(wrb, 0, sizeof(*wrb)); 751 memset(wrb, 0, sizeof(*wrb));
709 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 752 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -759,7 +802,6 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
759 void *ctxt = &req->context; 802 void *ctxt = &req->context;
760 int status; 803 int status;
761 804
762 SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n");
763 spin_lock(&ctrl->mbox_lock); 805 spin_lock(&ctrl->mbox_lock);
764 memset(wrb, 0, sizeof(*wrb)); 806 memset(wrb, 0, sizeof(*wrb));
765 807
@@ -830,6 +872,7 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
830{ 872{
831 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 873 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
832 struct be_post_sgl_pages_req *req = embedded_payload(wrb); 874 struct be_post_sgl_pages_req *req = embedded_payload(wrb);
875 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
833 int status; 876 int status;
834 unsigned int curr_pages; 877 unsigned int curr_pages;
835 u32 internal_page_offset = 0; 878 u32 internal_page_offset = 0;
@@ -860,8 +903,9 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
860 903
861 status = be_mbox_notify(ctrl); 904 status = be_mbox_notify(ctrl);
862 if (status) { 905 if (status) {
863 SE_DEBUG(DBG_LVL_1, 906 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
864 "FW CMD to map iscsi frags failed.\n"); 907 "BC_%d : FW CMD to map iscsi frags failed.\n");
908
865 goto error; 909 goto error;
866 } 910 }
867 } while (num_pages > 0); 911 } while (num_pages > 0);
@@ -890,3 +934,45 @@ int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
890 spin_unlock(&ctrl->mbox_lock); 934 spin_unlock(&ctrl->mbox_lock);
891 return status; 935 return status;
892} 936}
937
938/**
939 * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
940 * @phba: device priv structure instance
941 * @vlan_tag: TAG to be set
942 *
943 * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
944 *
945 * returns
946 * TAG for the MBX Cmd
947 * **/
948int be_cmd_set_vlan(struct beiscsi_hba *phba,
949 uint16_t vlan_tag)
950{
951 unsigned int tag = 0;
952 struct be_mcc_wrb *wrb;
953 struct be_cmd_set_vlan_req *req;
954 struct be_ctrl_info *ctrl = &phba->ctrl;
955
956 spin_lock(&ctrl->mbox_lock);
957 tag = alloc_mcc_tag(phba);
958 if (!tag) {
959 spin_unlock(&ctrl->mbox_lock);
960 return tag;
961 }
962
963 wrb = wrb_from_mccq(phba);
964 req = embedded_payload(wrb);
965 wrb->tag0 |= tag;
966 be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
967 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
968 OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
969 sizeof(*req));
970
971 req->interface_hndl = phba->interface_handle;
972 req->vlan_priority = vlan_tag;
973
974 be_mcc_notify(phba);
975 spin_unlock(&ctrl->mbox_lock);
976
977 return tag;
978}
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index b0b36c6a145..2c8f98df128 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -348,6 +348,23 @@ struct be_cmd_get_boot_target_resp {
348 int boot_session_handle; 348 int boot_session_handle;
349}; 349};
350 350
351struct be_cmd_reopen_session_req {
352 struct be_cmd_req_hdr hdr;
353#define BE_REOPEN_ALL_SESSIONS 0x00
354#define BE_REOPEN_BOOT_SESSIONS 0x01
355#define BE_REOPEN_A_SESSION 0x02
356 u16 reopen_type;
357 u16 rsvd;
358 u32 session_handle;
359} __packed;
360
361struct be_cmd_reopen_session_resp {
362 struct be_cmd_resp_hdr hdr;
363 u32 rsvd;
364 u32 session_handle;
365} __packed;
366
367
351struct be_cmd_mac_query_req { 368struct be_cmd_mac_query_req {
352 struct be_cmd_req_hdr hdr; 369 struct be_cmd_req_hdr hdr;
353 u8 type; 370 u8 type;
@@ -432,6 +449,12 @@ struct be_cmd_get_def_gateway_resp {
432 struct ip_addr_format ip_addr; 449 struct ip_addr_format ip_addr;
433} __packed; 450} __packed;
434 451
452#define BEISCSI_VLAN_DISABLE 0xFFFF
453struct be_cmd_set_vlan_req {
454 struct be_cmd_req_hdr hdr;
455 u32 interface_hndl;
456 u32 vlan_priority;
457} __packed;
435/******************** Create CQ ***************************/ 458/******************** Create CQ ***************************/
436/** 459/**
437 * Pseudo amap definition in which each bit of the actual structure is defined 460 * Pseudo amap definition in which each bit of the actual structure is defined
@@ -671,6 +694,9 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
671 694
672bool is_link_state_evt(u32 trailer); 695bool is_link_state_evt(u32 trailer);
673 696
697/* Configuration Functions */
698int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
699
674struct be_default_pdu_context { 700struct be_default_pdu_context {
675 u32 dw[4]; 701 u32 dw[4];
676} __packed; 702} __packed;
@@ -911,6 +937,7 @@ struct be_cmd_get_all_if_id_req {
911#define OPCODE_ISCSI_INI_CFG_GET_HBA_NAME 6 937#define OPCODE_ISCSI_INI_CFG_GET_HBA_NAME 6
912#define OPCODE_ISCSI_INI_CFG_SET_HBA_NAME 7 938#define OPCODE_ISCSI_INI_CFG_SET_HBA_NAME 7
913#define OPCODE_ISCSI_INI_SESSION_GET_A_SESSION 14 939#define OPCODE_ISCSI_INI_SESSION_GET_A_SESSION 14
940#define OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS 36
914#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41 941#define OPCODE_ISCSI_INI_DRIVER_OFFLOAD_SESSION 41
915#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42 942#define OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION 42
916#define OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET 52 943#define OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET 52
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 43f35034585..aedb0d9a9da 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -50,21 +50,27 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
50 struct beiscsi_session *beiscsi_sess; 50 struct beiscsi_session *beiscsi_sess;
51 struct beiscsi_io_task *io_task; 51 struct beiscsi_io_task *io_task;
52 52
53 SE_DEBUG(DBG_LVL_8, "In beiscsi_session_create\n");
54 53
55 if (!ep) { 54 if (!ep) {
56 SE_DEBUG(DBG_LVL_1, "beiscsi_session_create: invalid ep\n"); 55 printk(KERN_ERR
56 "beiscsi_session_create: invalid ep\n");
57 return NULL; 57 return NULL;
58 } 58 }
59 beiscsi_ep = ep->dd_data; 59 beiscsi_ep = ep->dd_data;
60 phba = beiscsi_ep->phba; 60 phba = beiscsi_ep->phba;
61 shost = phba->shost; 61 shost = phba->shost;
62
63 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
64 "BS_%d : In beiscsi_session_create\n");
65
62 if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) { 66 if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
63 shost_printk(KERN_ERR, shost, "Cannot handle %d cmds." 67 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
64 "Max cmds per session supported is %d. Using %d. " 68 "BS_%d : Cannot handle %d cmds."
65 "\n", cmds_max, 69 "Max cmds per session supported is %d. Using %d."
66 beiscsi_ep->phba->params.wrbs_per_cxn, 70 "\n", cmds_max,
67 beiscsi_ep->phba->params.wrbs_per_cxn); 71 beiscsi_ep->phba->params.wrbs_per_cxn,
72 beiscsi_ep->phba->params.wrbs_per_cxn);
73
68 cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn; 74 cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;
69 } 75 }
70 76
@@ -102,7 +108,7 @@ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
102 struct iscsi_session *sess = cls_session->dd_data; 108 struct iscsi_session *sess = cls_session->dd_data;
103 struct beiscsi_session *beiscsi_sess = sess->dd_data; 109 struct beiscsi_session *beiscsi_sess = sess->dd_data;
104 110
105 SE_DEBUG(DBG_LVL_8, "In beiscsi_session_destroy\n"); 111 printk(KERN_INFO "In beiscsi_session_destroy\n");
106 pci_pool_destroy(beiscsi_sess->bhs_pool); 112 pci_pool_destroy(beiscsi_sess->bhs_pool);
107 iscsi_session_teardown(cls_session); 113 iscsi_session_teardown(cls_session);
108} 114}
@@ -123,11 +129,13 @@ beiscsi_conn_create(struct iscsi_cls_session *cls_session, u32 cid)
123 struct iscsi_session *sess; 129 struct iscsi_session *sess;
124 struct beiscsi_session *beiscsi_sess; 130 struct beiscsi_session *beiscsi_sess;
125 131
126 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_create ,cid"
127 "from iscsi layer=%d\n", cid);
128 shost = iscsi_session_to_shost(cls_session); 132 shost = iscsi_session_to_shost(cls_session);
129 phba = iscsi_host_priv(shost); 133 phba = iscsi_host_priv(shost);
130 134
135 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
136 "BS_%d : In beiscsi_conn_create ,cid"
137 "from iscsi layer=%d\n", cid);
138
131 cls_conn = iscsi_conn_setup(cls_session, sizeof(*beiscsi_conn), cid); 139 cls_conn = iscsi_conn_setup(cls_session, sizeof(*beiscsi_conn), cid);
132 if (!cls_conn) 140 if (!cls_conn)
133 return NULL; 141 return NULL;
@@ -154,12 +162,15 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
154 unsigned int cid) 162 unsigned int cid)
155{ 163{
156 if (phba->conn_table[cid]) { 164 if (phba->conn_table[cid]) {
157 SE_DEBUG(DBG_LVL_1, 165 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
158 "Connection table already occupied. Detected clash\n"); 166 "BS_%d : Connection table already occupied. Detected clash\n");
167
159 return -EINVAL; 168 return -EINVAL;
160 } else { 169 } else {
161 SE_DEBUG(DBG_LVL_8, "phba->conn_table[%d]=%p(beiscsi_conn)\n", 170 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
162 cid, beiscsi_conn); 171 "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
172 cid, beiscsi_conn);
173
163 phba->conn_table[cid] = beiscsi_conn; 174 phba->conn_table[cid] = beiscsi_conn;
164 } 175 }
165 return 0; 176 return 0;
@@ -184,7 +195,6 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
184 struct beiscsi_endpoint *beiscsi_ep; 195 struct beiscsi_endpoint *beiscsi_ep;
185 struct iscsi_endpoint *ep; 196 struct iscsi_endpoint *ep;
186 197
187 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_bind\n");
188 ep = iscsi_lookup_endpoint(transport_fd); 198 ep = iscsi_lookup_endpoint(transport_fd);
189 if (!ep) 199 if (!ep)
190 return -EINVAL; 200 return -EINVAL;
@@ -195,17 +205,21 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
195 return -EINVAL; 205 return -EINVAL;
196 206
197 if (beiscsi_ep->phba != phba) { 207 if (beiscsi_ep->phba != phba) {
198 SE_DEBUG(DBG_LVL_8, 208 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
199 "beiscsi_ep->hba=%p not equal to phba=%p\n", 209 "BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n",
200 beiscsi_ep->phba, phba); 210 beiscsi_ep->phba, phba);
211
201 return -EEXIST; 212 return -EEXIST;
202 } 213 }
203 214
204 beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid; 215 beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
205 beiscsi_conn->ep = beiscsi_ep; 216 beiscsi_conn->ep = beiscsi_ep;
206 beiscsi_ep->conn = beiscsi_conn; 217 beiscsi_ep->conn = beiscsi_conn;
207 SE_DEBUG(DBG_LVL_8, "beiscsi_conn=%p conn=%p ep_cid=%d\n", 218
208 beiscsi_conn, conn, beiscsi_ep->ep_cid); 219 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
220 "BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n",
221 beiscsi_conn, conn, beiscsi_ep->ep_cid);
222
209 return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid); 223 return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
210} 224}
211 225
@@ -219,8 +233,9 @@ static int beiscsi_create_ipv4_iface(struct beiscsi_hba *phba)
219 ISCSI_IFACE_TYPE_IPV4, 233 ISCSI_IFACE_TYPE_IPV4,
220 0, 0); 234 0, 0);
221 if (!phba->ipv4_iface) { 235 if (!phba->ipv4_iface) {
222 shost_printk(KERN_ERR, phba->shost, "Could not " 236 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
223 "create default IPv4 address.\n"); 237 "BS_%d : Could not "
238 "create default IPv4 address.\n");
224 return -ENODEV; 239 return -ENODEV;
225 } 240 }
226 241
@@ -237,8 +252,9 @@ static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
237 ISCSI_IFACE_TYPE_IPV6, 252 ISCSI_IFACE_TYPE_IPV6,
238 0, 0); 253 0, 0);
239 if (!phba->ipv6_iface) { 254 if (!phba->ipv6_iface) {
240 shost_printk(KERN_ERR, phba->shost, "Could not " 255 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
241 "create default IPv6 address.\n"); 256 "BS_%d : Could not "
257 "create default IPv6 address.\n");
242 return -ENODEV; 258 return -ENODEV;
243 } 259 }
244 260
@@ -299,12 +315,14 @@ beiscsi_set_static_ip(struct Scsi_Host *shost,
299 iface_ip = nla_data(nla); 315 iface_ip = nla_data(nla);
300 break; 316 break;
301 default: 317 default:
302 shost_printk(KERN_ERR, shost, "Unsupported param %d\n", 318 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
303 iface_param->param); 319 "BS_%d : Unsupported param %d\n",
320 iface_param->param);
304 } 321 }
305 322
306 if (!iface_ip || !iface_subnet) { 323 if (!iface_ip || !iface_subnet) {
307 shost_printk(KERN_ERR, shost, "IP and Subnet Mask required\n"); 324 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
325 "BS_%d : IP and Subnet Mask required\n");
308 return -EINVAL; 326 return -EINVAL;
309 } 327 }
310 328
@@ -314,6 +332,51 @@ beiscsi_set_static_ip(struct Scsi_Host *shost,
314 return ret; 332 return ret;
315} 333}
316 334
335/**
336 * beiscsi_set_vlan_tag()- Set the VLAN TAG
337 * @shost: Scsi Host for the driver instance
338 * @iface_param: Interface paramters
339 *
340 * Set the VLAN TAG for the adapter or disable
341 * the VLAN config
342 *
343 * returns
344 * Success: 0
345 * Failure: Non-Zero Value
346 **/
347static int
348beiscsi_set_vlan_tag(struct Scsi_Host *shost,
349 struct iscsi_iface_param_info *iface_param)
350{
351 struct beiscsi_hba *phba = iscsi_host_priv(shost);
352 int ret = 0;
353
354 /* Get the Interface Handle */
355 if (mgmt_get_all_if_id(phba)) {
356 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
357 "BS_%d : Getting Interface Handle Failed\n");
358 return -EIO;
359 }
360
361 switch (iface_param->param) {
362 case ISCSI_NET_PARAM_VLAN_ENABLED:
363 if (iface_param->value[0] != ISCSI_VLAN_ENABLE)
364 ret = mgmt_set_vlan(phba, BEISCSI_VLAN_DISABLE);
365 break;
366 case ISCSI_NET_PARAM_VLAN_TAG:
367 ret = mgmt_set_vlan(phba,
368 *((uint16_t *)iface_param->value));
369 break;
370 default:
371 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
372 "BS_%d : Unkown Param Type : %d\n",
373 iface_param->param);
374 return -ENOSYS;
375 }
376 return ret;
377}
378
379
317static int 380static int
318beiscsi_set_ipv4(struct Scsi_Host *shost, 381beiscsi_set_ipv4(struct Scsi_Host *shost,
319 struct iscsi_iface_param_info *iface_param, 382 struct iscsi_iface_param_info *iface_param,
@@ -335,8 +398,9 @@ beiscsi_set_ipv4(struct Scsi_Host *shost,
335 ret = beiscsi_set_static_ip(shost, iface_param, 398 ret = beiscsi_set_static_ip(shost, iface_param,
336 data, dt_len); 399 data, dt_len);
337 else 400 else
338 shost_printk(KERN_ERR, shost, "Invalid BOOTPROTO: %d\n", 401 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
339 iface_param->value[0]); 402 "BS_%d : Invalid BOOTPROTO: %d\n",
403 iface_param->value[0]);
340 break; 404 break;
341 case ISCSI_NET_PARAM_IFACE_ENABLE: 405 case ISCSI_NET_PARAM_IFACE_ENABLE:
342 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) 406 if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
@@ -349,9 +413,14 @@ beiscsi_set_ipv4(struct Scsi_Host *shost,
349 ret = beiscsi_set_static_ip(shost, iface_param, 413 ret = beiscsi_set_static_ip(shost, iface_param,
350 data, dt_len); 414 data, dt_len);
351 break; 415 break;
416 case ISCSI_NET_PARAM_VLAN_ENABLED:
417 case ISCSI_NET_PARAM_VLAN_TAG:
418 ret = beiscsi_set_vlan_tag(shost, iface_param);
419 break;
352 default: 420 default:
353 shost_printk(KERN_ERR, shost, "Param %d not supported\n", 421 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
354 iface_param->param); 422 "BS_%d : Param %d not supported\n",
423 iface_param->param);
355 } 424 }
356 425
357 return ret; 426 return ret;
@@ -379,8 +448,9 @@ beiscsi_set_ipv6(struct Scsi_Host *shost,
379 ISCSI_BOOTPROTO_STATIC); 448 ISCSI_BOOTPROTO_STATIC);
380 break; 449 break;
381 default: 450 default:
382 shost_printk(KERN_ERR, shost, "Param %d not supported\n", 451 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
383 iface_param->param); 452 "BS_%d : Param %d not supported\n",
453 iface_param->param);
384 } 454 }
385 455
386 return ret; 456 return ret;
@@ -390,6 +460,7 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
390 void *data, uint32_t dt_len) 460 void *data, uint32_t dt_len)
391{ 461{
392 struct iscsi_iface_param_info *iface_param = NULL; 462 struct iscsi_iface_param_info *iface_param = NULL;
463 struct beiscsi_hba *phba = iscsi_host_priv(shost);
393 struct nlattr *attrib; 464 struct nlattr *attrib;
394 uint32_t rm_len = dt_len; 465 uint32_t rm_len = dt_len;
395 int ret = 0 ; 466 int ret = 0 ;
@@ -404,9 +475,11 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
404 * BE2ISCSI only supports 1 interface 475 * BE2ISCSI only supports 1 interface
405 */ 476 */
406 if (iface_param->iface_num) { 477 if (iface_param->iface_num) {
407 shost_printk(KERN_ERR, shost, "Invalid iface_num %d." 478 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
408 "Only iface_num 0 is supported.\n", 479 "BS_%d : Invalid iface_num %d."
409 iface_param->iface_num); 480 "Only iface_num 0 is supported.\n",
481 iface_param->iface_num);
482
410 return -EINVAL; 483 return -EINVAL;
411 } 484 }
412 485
@@ -420,9 +493,9 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
420 data, dt_len); 493 data, dt_len);
421 break; 494 break;
422 default: 495 default:
423 shost_printk(KERN_ERR, shost, 496 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
424 "Invalid iface type :%d passed\n", 497 "BS_%d : Invalid iface type :%d passed\n",
425 iface_param->iface_type); 498 iface_param->iface_type);
426 break; 499 break;
427 } 500 }
428 501
@@ -465,6 +538,27 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
465 case ISCSI_NET_PARAM_IPV4_SUBNET: 538 case ISCSI_NET_PARAM_IPV4_SUBNET:
466 len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask); 539 len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask);
467 break; 540 break;
541 case ISCSI_NET_PARAM_VLAN_ENABLED:
542 len = sprintf(buf, "%s\n",
543 (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
544 ? "Disabled" : "Enabled");
545 break;
546 case ISCSI_NET_PARAM_VLAN_ID:
547 if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
548 return -EINVAL;
549 else
550 len = sprintf(buf, "%d\n",
551 (if_info.vlan_priority &
552 ISCSI_MAX_VLAN_ID));
553 break;
554 case ISCSI_NET_PARAM_VLAN_PRIORITY:
555 if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
556 return -EINVAL;
557 else
558 len = sprintf(buf, "%d\n",
559 ((if_info.vlan_priority >> 13) &
560 ISCSI_MAX_VLAN_PRIORITY));
561 break;
468 default: 562 default:
469 WARN_ON(1); 563 WARN_ON(1);
470 } 564 }
@@ -486,6 +580,9 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface,
486 case ISCSI_NET_PARAM_IPV4_SUBNET: 580 case ISCSI_NET_PARAM_IPV4_SUBNET:
487 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 581 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
488 case ISCSI_NET_PARAM_IPV6_ADDR: 582 case ISCSI_NET_PARAM_IPV6_ADDR:
583 case ISCSI_NET_PARAM_VLAN_ENABLED:
584 case ISCSI_NET_PARAM_VLAN_ID:
585 case ISCSI_NET_PARAM_VLAN_PRIORITY:
489 len = be2iscsi_get_if_param(phba, iface, param, buf); 586 len = be2iscsi_get_if_param(phba, iface, param, buf);
490 break; 587 break;
491 case ISCSI_NET_PARAM_IFACE_ENABLE: 588 case ISCSI_NET_PARAM_IFACE_ENABLE:
@@ -518,7 +615,10 @@ int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
518 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; 615 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
519 int len = 0; 616 int len = 0;
520 617
521 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_get_param, param= %d\n", param); 618 beiscsi_log(beiscsi_ep->phba, KERN_INFO,
619 BEISCSI_LOG_CONFIG,
620 "BS_%d : In beiscsi_ep_get_param,"
621 " param= %d\n", param);
522 622
523 switch (param) { 623 switch (param) {
524 case ISCSI_PARAM_CONN_PORT: 624 case ISCSI_PARAM_CONN_PORT:
@@ -541,9 +641,14 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
541{ 641{
542 struct iscsi_conn *conn = cls_conn->dd_data; 642 struct iscsi_conn *conn = cls_conn->dd_data;
543 struct iscsi_session *session = conn->session; 643 struct iscsi_session *session = conn->session;
644 struct beiscsi_hba *phba = NULL;
544 int ret; 645 int ret;
545 646
546 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_set_param, param= %d\n", param); 647 phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
648 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
649 "BS_%d : In beiscsi_conn_set_param,"
650 " param= %d\n", param);
651
547 ret = iscsi_set_param(cls_conn, param, buf, buflen); 652 ret = iscsi_set_param(cls_conn, param, buf, buflen);
548 if (ret) 653 if (ret)
549 return ret; 654 return ret;
@@ -593,7 +698,9 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
593 698
594 tag = be_cmd_get_initname(phba); 699 tag = be_cmd_get_initname(phba);
595 if (!tag) { 700 if (!tag) {
596 SE_DEBUG(DBG_LVL_1, "Getting Initiator Name Failed\n"); 701 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
702 "BS_%d : Getting Initiator Name Failed\n");
703
597 return -EBUSY; 704 return -EBUSY;
598 } else 705 } else
599 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 706 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -604,9 +711,12 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
604 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; 711 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
605 712
606 if (status || extd_status) { 713 if (status || extd_status) {
607 SE_DEBUG(DBG_LVL_1, "MailBox Command Failed with " 714 beiscsi_log(phba, KERN_ERR,
608 "status = %d extd_status = %d\n", 715 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
609 status, extd_status); 716 "BS_%d : MailBox Command Failed with "
717 "status = %d extd_status = %d\n",
718 status, extd_status);
719
610 free_mcc_tag(&phba->ctrl, tag); 720 free_mcc_tag(&phba->ctrl, tag);
611 return -EAGAIN; 721 return -EAGAIN;
612 } 722 }
@@ -650,7 +760,9 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
650 760
651 tag = be_cmd_get_port_speed(phba); 761 tag = be_cmd_get_port_speed(phba);
652 if (!tag) { 762 if (!tag) {
653 SE_DEBUG(DBG_LVL_1, "Getting Port Speed Failed\n"); 763 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
764 "BS_%d : Getting Port Speed Failed\n");
765
654 return -EBUSY; 766 return -EBUSY;
655 } else 767 } else
656 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 768 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -661,9 +773,12 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
661 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; 773 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
662 774
663 if (status || extd_status) { 775 if (status || extd_status) {
664 SE_DEBUG(DBG_LVL_1, "MailBox Command Failed with " 776 beiscsi_log(phba, KERN_ERR,
665 "status = %d extd_status = %d\n", 777 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
666 status, extd_status); 778 "BS_%d : MailBox Command Failed with "
779 "status = %d extd_status = %d\n",
780 status, extd_status);
781
667 free_mcc_tag(&phba->ctrl, tag); 782 free_mcc_tag(&phba->ctrl, tag);
668 return -EAGAIN; 783 return -EAGAIN;
669 } 784 }
@@ -704,20 +819,24 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
704 struct beiscsi_hba *phba = iscsi_host_priv(shost); 819 struct beiscsi_hba *phba = iscsi_host_priv(shost);
705 int status = 0; 820 int status = 0;
706 821
707 SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param); 822 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
823 "BS_%d : In beiscsi_get_host_param,"
824 " param= %d\n", param);
825
708 switch (param) { 826 switch (param) {
709 case ISCSI_HOST_PARAM_HWADDRESS: 827 case ISCSI_HOST_PARAM_HWADDRESS:
710 status = beiscsi_get_macaddr(buf, phba); 828 status = beiscsi_get_macaddr(buf, phba);
711 if (status < 0) { 829 if (status < 0) {
712 SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n"); 830 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
831 "BS_%d : beiscsi_get_macaddr Failed\n");
713 return status; 832 return status;
714 } 833 }
715 break; 834 break;
716 case ISCSI_HOST_PARAM_INITIATOR_NAME: 835 case ISCSI_HOST_PARAM_INITIATOR_NAME:
717 status = beiscsi_get_initname(buf, phba); 836 status = beiscsi_get_initname(buf, phba);
718 if (status < 0) { 837 if (status < 0) {
719 SE_DEBUG(DBG_LVL_1, 838 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
720 "Retreiving Initiator Name Failed\n"); 839 "BS_%d : Retreiving Initiator Name Failed\n");
721 return status; 840 return status;
722 } 841 }
723 break; 842 break;
@@ -728,8 +847,8 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
728 case ISCSI_HOST_PARAM_PORT_SPEED: 847 case ISCSI_HOST_PARAM_PORT_SPEED:
729 status = beiscsi_get_port_speed(shost); 848 status = beiscsi_get_port_speed(shost);
730 if (status) { 849 if (status) {
731 SE_DEBUG(DBG_LVL_1, 850 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
732 "Retreiving Port Speed Failed\n"); 851 "BS_%d : Retreiving Port Speed Failed\n");
733 return status; 852 return status;
734 } 853 }
735 status = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost)); 854 status = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
@@ -746,7 +865,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
746 int rc; 865 int rc;
747 866
748 if (strlen(phba->mac_address)) 867 if (strlen(phba->mac_address))
749 return strlcpy(buf, phba->mac_address, PAGE_SIZE); 868 return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);
750 869
751 memset(&resp, 0, sizeof(resp)); 870 memset(&resp, 0, sizeof(resp));
752 rc = mgmt_get_nic_conf(phba, &resp); 871 rc = mgmt_get_nic_conf(phba, &resp);
@@ -768,8 +887,12 @@ void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
768 struct iscsi_stats *stats) 887 struct iscsi_stats *stats)
769{ 888{
770 struct iscsi_conn *conn = cls_conn->dd_data; 889 struct iscsi_conn *conn = cls_conn->dd_data;
890 struct beiscsi_hba *phba = NULL;
891
892 phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
893 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
894 "BS_%d : In beiscsi_conn_get_stats\n");
771 895
772 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_stats\n");
773 stats->txdata_octets = conn->txdata_octets; 896 stats->txdata_octets = conn->txdata_octets;
774 stats->rxdata_octets = conn->rxdata_octets; 897 stats->rxdata_octets = conn->rxdata_octets;
775 stats->dataout_pdus = conn->dataout_pdus_cnt; 898 stats->dataout_pdus = conn->dataout_pdus_cnt;
@@ -829,11 +952,16 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
829 struct beiscsi_endpoint *beiscsi_ep; 952 struct beiscsi_endpoint *beiscsi_ep;
830 struct beiscsi_offload_params params; 953 struct beiscsi_offload_params params;
831 954
832 SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_start\n"); 955 beiscsi_log(beiscsi_conn->phba, KERN_INFO,
956 BEISCSI_LOG_CONFIG,
957 "BS_%d : In beiscsi_conn_start\n");
958
833 memset(&params, 0, sizeof(struct beiscsi_offload_params)); 959 memset(&params, 0, sizeof(struct beiscsi_offload_params));
834 beiscsi_ep = beiscsi_conn->ep; 960 beiscsi_ep = beiscsi_conn->ep;
835 if (!beiscsi_ep) 961 if (!beiscsi_ep)
836 SE_DEBUG(DBG_LVL_1, "In beiscsi_conn_start , no beiscsi_ep\n"); 962 beiscsi_log(beiscsi_conn->phba, KERN_ERR,
963 BEISCSI_LOG_CONFIG,
964 "BS_%d : In beiscsi_conn_start , no beiscsi_ep\n");
837 965
838 beiscsi_conn->login_in_progress = 0; 966 beiscsi_conn->login_in_progress = 0;
839 beiscsi_set_params_for_offld(beiscsi_conn, &params); 967 beiscsi_set_params_for_offld(beiscsi_conn, &params);
@@ -907,19 +1035,27 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
907 unsigned int tag, wrb_num; 1035 unsigned int tag, wrb_num;
908 int ret = -ENOMEM; 1036 int ret = -ENOMEM;
909 1037
910 SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn\n"); 1038 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1039 "BS_%d : In beiscsi_open_conn\n");
1040
911 beiscsi_ep->ep_cid = beiscsi_get_cid(phba); 1041 beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
912 if (beiscsi_ep->ep_cid == 0xFFFF) { 1042 if (beiscsi_ep->ep_cid == 0xFFFF) {
913 SE_DEBUG(DBG_LVL_1, "No free cid available\n"); 1043 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
1044 "BS_%d : No free cid available\n");
914 return ret; 1045 return ret;
915 } 1046 }
916 SE_DEBUG(DBG_LVL_8, "In beiscsi_open_conn, ep_cid=%d\n", 1047
917 beiscsi_ep->ep_cid); 1048 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1049 "BS_%d : In beiscsi_open_conn, ep_cid=%d\n",
1050 beiscsi_ep->ep_cid);
1051
918 phba->ep_array[beiscsi_ep->ep_cid - 1052 phba->ep_array[beiscsi_ep->ep_cid -
919 phba->fw_config.iscsi_cid_start] = ep; 1053 phba->fw_config.iscsi_cid_start] = ep;
920 if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start + 1054 if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
921 phba->params.cxns_per_ctrl * 2)) { 1055 phba->params.cxns_per_ctrl * 2)) {
922 SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n"); 1056
1057 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
1058 "BS_%d : Failed in allocate iscsi cid\n");
923 goto free_ep; 1059 goto free_ep;
924 } 1060 }
925 1061
@@ -928,9 +1064,11 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
928 sizeof(struct tcp_connect_and_offload_in), 1064 sizeof(struct tcp_connect_and_offload_in),
929 &nonemb_cmd.dma); 1065 &nonemb_cmd.dma);
930 if (nonemb_cmd.va == NULL) { 1066 if (nonemb_cmd.va == NULL) {
931 SE_DEBUG(DBG_LVL_1, 1067
932 "Failed to allocate memory for mgmt_open_connection" 1068 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
933 "\n"); 1069 "BS_%d : Failed to allocate memory for"
1070 " mgmt_open_connection\n");
1071
934 beiscsi_put_cid(phba, beiscsi_ep->ep_cid); 1072 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
935 return -ENOMEM; 1073 return -ENOMEM;
936 } 1074 }
@@ -938,9 +1076,10 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
938 memset(nonemb_cmd.va, 0, nonemb_cmd.size); 1076 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
939 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); 1077 tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
940 if (!tag) { 1078 if (!tag) {
941 SE_DEBUG(DBG_LVL_1, 1079 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
942 "mgmt_open_connection Failed for cid=%d\n", 1080 "BS_%d : mgmt_open_connection Failed for cid=%d\n",
943 beiscsi_ep->ep_cid); 1081 beiscsi_ep->ep_cid);
1082
944 beiscsi_put_cid(phba, beiscsi_ep->ep_cid); 1083 beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
945 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 1084 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
946 nonemb_cmd.va, nonemb_cmd.dma); 1085 nonemb_cmd.va, nonemb_cmd.dma);
@@ -953,9 +1092,12 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
953 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; 1092 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
954 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; 1093 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
955 if (status || extd_status) { 1094 if (status || extd_status) {
956 SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed" 1095 beiscsi_log(phba, KERN_ERR,
957 " status = %d extd_status = %d\n", 1096 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
958 status, extd_status); 1097 "BS_%d : mgmt_open_connection Failed"
1098 " status = %d extd_status = %d\n",
1099 status, extd_status);
1100
959 free_mcc_tag(&phba->ctrl, tag); 1101 free_mcc_tag(&phba->ctrl, tag);
960 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 1102 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
961 nonemb_cmd.va, nonemb_cmd.dma); 1103 nonemb_cmd.va, nonemb_cmd.dma);
@@ -968,7 +1110,8 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
968 beiscsi_ep = ep->dd_data; 1110 beiscsi_ep = ep->dd_data;
969 beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle; 1111 beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
970 beiscsi_ep->cid_vld = 1; 1112 beiscsi_ep->cid_vld = 1;
971 SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n"); 1113 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1114 "BS_%d : mgmt_open_connection Success\n");
972 } 1115 }
973 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 1116 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
974 nonemb_cmd.va, nonemb_cmd.dma); 1117 nonemb_cmd.va, nonemb_cmd.dma);
@@ -996,18 +1139,19 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
996 struct iscsi_endpoint *ep; 1139 struct iscsi_endpoint *ep;
997 int ret; 1140 int ret;
998 1141
999 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_connect\n");
1000 if (shost) 1142 if (shost)
1001 phba = iscsi_host_priv(shost); 1143 phba = iscsi_host_priv(shost);
1002 else { 1144 else {
1003 ret = -ENXIO; 1145 ret = -ENXIO;
1004 SE_DEBUG(DBG_LVL_1, "shost is NULL\n"); 1146 printk(KERN_ERR
1147 "beiscsi_ep_connect shost is NULL\n");
1005 return ERR_PTR(ret); 1148 return ERR_PTR(ret);
1006 } 1149 }
1007 1150
1008 if (phba->state != BE_ADAPTER_UP) { 1151 if (phba->state != BE_ADAPTER_UP) {
1009 ret = -EBUSY; 1152 ret = -EBUSY;
1010 SE_DEBUG(DBG_LVL_1, "The Adapter state is Not UP\n"); 1153 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
1154 "BS_%d : The Adapter state is Not UP\n");
1011 return ERR_PTR(ret); 1155 return ERR_PTR(ret);
1012 } 1156 }
1013 1157
@@ -1022,7 +1166,8 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
1022 beiscsi_ep->openiscsi_ep = ep; 1166 beiscsi_ep->openiscsi_ep = ep;
1023 ret = beiscsi_open_conn(ep, NULL, dst_addr, non_blocking); 1167 ret = beiscsi_open_conn(ep, NULL, dst_addr, non_blocking);
1024 if (ret) { 1168 if (ret) {
1025 SE_DEBUG(DBG_LVL_1, "Failed in beiscsi_open_conn\n"); 1169 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
1170 "BS_%d : Failed in beiscsi_open_conn\n");
1026 goto free_ep; 1171 goto free_ep;
1027 } 1172 }
1028 1173
@@ -1044,7 +1189,9 @@ int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1044{ 1189{
1045 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; 1190 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
1046 1191
1047 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_poll\n"); 1192 beiscsi_log(beiscsi_ep->phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1193 "BS_%d : In beiscsi_ep_poll\n");
1194
1048 if (beiscsi_ep->cid_vld == 1) 1195 if (beiscsi_ep->cid_vld == 1)
1049 return 1; 1196 return 1;
1050 else 1197 else
@@ -1064,8 +1211,10 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
1064 1211
1065 tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag); 1212 tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag);
1066 if (!tag) { 1213 if (!tag) {
1067 SE_DEBUG(DBG_LVL_8, "upload failed for cid 0x%x\n", 1214 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1068 beiscsi_ep->ep_cid); 1215 "BS_%d : upload failed for cid 0x%x\n",
1216 beiscsi_ep->ep_cid);
1217
1069 ret = -EAGAIN; 1218 ret = -EAGAIN;
1070 } else { 1219 } else {
1071 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 1220 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -1086,7 +1235,8 @@ static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
1086 if (phba->conn_table[cid]) 1235 if (phba->conn_table[cid])
1087 phba->conn_table[cid] = NULL; 1236 phba->conn_table[cid] = NULL;
1088 else { 1237 else {
1089 SE_DEBUG(DBG_LVL_8, "Connection table Not occupied.\n"); 1238 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1239 "BS_%d : Connection table Not occupied.\n");
1090 return -EINVAL; 1240 return -EINVAL;
1091 } 1241 }
1092 return 0; 1242 return 0;
@@ -1104,38 +1254,40 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
1104 struct beiscsi_endpoint *beiscsi_ep; 1254 struct beiscsi_endpoint *beiscsi_ep;
1105 struct beiscsi_hba *phba; 1255 struct beiscsi_hba *phba;
1106 unsigned int tag; 1256 unsigned int tag;
1257 uint8_t mgmt_invalidate_flag, tcp_upload_flag;
1107 unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH; 1258 unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
1108 1259
1109 beiscsi_ep = ep->dd_data; 1260 beiscsi_ep = ep->dd_data;
1110 phba = beiscsi_ep->phba; 1261 phba = beiscsi_ep->phba;
1111 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect for ep_cid = %d\n", 1262 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1112 beiscsi_ep->ep_cid); 1263 "BS_%d : In beiscsi_ep_disconnect for ep_cid = %d\n",
1113 1264 beiscsi_ep->ep_cid);
1114 if (!beiscsi_ep->conn) { 1265
1115 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect, no " 1266 if (beiscsi_ep->conn) {
1116 "beiscsi_ep\n"); 1267 beiscsi_conn = beiscsi_ep->conn;
1117 return; 1268 iscsi_suspend_queue(beiscsi_conn->conn);
1269 mgmt_invalidate_flag = ~BEISCSI_NO_RST_ISSUE;
1270 tcp_upload_flag = CONNECTION_UPLOAD_GRACEFUL;
1271 } else {
1272 mgmt_invalidate_flag = BEISCSI_NO_RST_ISSUE;
1273 tcp_upload_flag = CONNECTION_UPLOAD_ABORT;
1118 } 1274 }
1119 beiscsi_conn = beiscsi_ep->conn;
1120 iscsi_suspend_queue(beiscsi_conn->conn);
1121
1122 SE_DEBUG(DBG_LVL_8, "In beiscsi_ep_disconnect ep_cid = %d\n",
1123 beiscsi_ep->ep_cid);
1124 1275
1125 tag = mgmt_invalidate_connection(phba, beiscsi_ep, 1276 tag = mgmt_invalidate_connection(phba, beiscsi_ep,
1126 beiscsi_ep->ep_cid, 1, 1277 beiscsi_ep->ep_cid,
1127 savecfg_flag); 1278 mgmt_invalidate_flag,
1279 savecfg_flag);
1128 if (!tag) { 1280 if (!tag) {
1129 SE_DEBUG(DBG_LVL_1, 1281 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
1130 "mgmt_invalidate_connection Failed for cid=%d\n", 1282 "BS_%d : mgmt_invalidate_connection Failed for cid=%d\n",
1131 beiscsi_ep->ep_cid); 1283 beiscsi_ep->ep_cid);
1132 } else { 1284 } else {
1133 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 1285 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
1134 phba->ctrl.mcc_numtag[tag]); 1286 phba->ctrl.mcc_numtag[tag]);
1135 free_mcc_tag(&phba->ctrl, tag); 1287 free_mcc_tag(&phba->ctrl, tag);
1136 } 1288 }
1137 1289
1138 beiscsi_close_conn(beiscsi_ep, CONNECTION_UPLOAD_GRACEFUL); 1290 beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
1139 beiscsi_free_ep(beiscsi_ep); 1291 beiscsi_free_ep(beiscsi_ep);
1140 beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid); 1292 beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
1141 iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep); 1293 iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
@@ -1152,6 +1304,9 @@ umode_t be2iscsi_attr_is_visible(int param_type, int param)
1152 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 1304 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
1153 case ISCSI_NET_PARAM_IPV4_GW: 1305 case ISCSI_NET_PARAM_IPV4_GW:
1154 case ISCSI_NET_PARAM_IPV6_ADDR: 1306 case ISCSI_NET_PARAM_IPV6_ADDR:
1307 case ISCSI_NET_PARAM_VLAN_ID:
1308 case ISCSI_NET_PARAM_VLAN_PRIORITY:
1309 case ISCSI_NET_PARAM_VLAN_ENABLED:
1155 return S_IRUGO; 1310 return S_IRUGO;
1156 default: 1311 default:
1157 return 0; 1312 return 0;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 0b1d99c99fd..ff73f9500b0 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -42,6 +42,7 @@
42#include "be_main.h" 42#include "be_main.h"
43#include "be_iscsi.h" 43#include "be_iscsi.h"
44#include "be_mgmt.h" 44#include "be_mgmt.h"
45#include "be_cmds.h"
45 46
46static unsigned int be_iopoll_budget = 10; 47static unsigned int be_iopoll_budget = 10;
47static unsigned int be_max_phys_size = 64; 48static unsigned int be_max_phys_size = 64;
@@ -57,9 +58,105 @@ MODULE_LICENSE("GPL");
57module_param(be_iopoll_budget, int, 0); 58module_param(be_iopoll_budget, int, 0);
58module_param(enable_msix, int, 0); 59module_param(enable_msix, int, 0);
59module_param(be_max_phys_size, uint, S_IRUGO); 60module_param(be_max_phys_size, uint, S_IRUGO);
60MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically" 61MODULE_PARM_DESC(be_max_phys_size,
61 "contiguous memory that can be allocated." 62 "Maximum Size (In Kilobytes) of physically contiguous "
62 "Range is 16 - 128"); 63 "memory that can be allocated. Range is 16 - 128");
64
65#define beiscsi_disp_param(_name)\
66ssize_t \
67beiscsi_##_name##_disp(struct device *dev,\
68 struct device_attribute *attrib, char *buf) \
69{ \
70 struct Scsi_Host *shost = class_to_shost(dev);\
71 struct beiscsi_hba *phba = iscsi_host_priv(shost); \
72 uint32_t param_val = 0; \
73 param_val = phba->attr_##_name;\
74 return snprintf(buf, PAGE_SIZE, "%d\n",\
75 phba->attr_##_name);\
76}
77
78#define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
79int \
80beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
81{\
82 if (val >= _minval && val <= _maxval) {\
83 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
84 "BA_%d : beiscsi_"#_name" updated "\
85 "from 0x%x ==> 0x%x\n",\
86 phba->attr_##_name, val); \
87 phba->attr_##_name = val;\
88 return 0;\
89 } \
90 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
91 "BA_%d beiscsi_"#_name" attribute "\
92 "cannot be updated to 0x%x, "\
93 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
94 return -EINVAL;\
95}
96
97#define beiscsi_store_param(_name) \
98ssize_t \
99beiscsi_##_name##_store(struct device *dev,\
100 struct device_attribute *attr, const char *buf,\
101 size_t count) \
102{ \
103 struct Scsi_Host *shost = class_to_shost(dev);\
104 struct beiscsi_hba *phba = iscsi_host_priv(shost);\
105 uint32_t param_val = 0;\
106 if (!isdigit(buf[0]))\
107 return -EINVAL;\
108 if (sscanf(buf, "%i", &param_val) != 1)\
109 return -EINVAL;\
110 if (beiscsi_##_name##_change(phba, param_val) == 0) \
111 return strlen(buf);\
112 else \
113 return -EINVAL;\
114}
115
116#define beiscsi_init_param(_name, _minval, _maxval, _defval) \
117int \
118beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
119{ \
120 if (val >= _minval && val <= _maxval) {\
121 phba->attr_##_name = val;\
122 return 0;\
123 } \
124 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
125 "BA_%d beiscsi_"#_name" attribute " \
126 "cannot be updated to 0x%x, "\
127 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
128 phba->attr_##_name = _defval;\
129 return -EINVAL;\
130}
131
132#define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
133static uint beiscsi_##_name = _defval;\
134module_param(beiscsi_##_name, uint, S_IRUGO);\
135MODULE_PARM_DESC(beiscsi_##_name, _descp);\
136beiscsi_disp_param(_name)\
137beiscsi_change_param(_name, _minval, _maxval, _defval)\
138beiscsi_store_param(_name)\
139beiscsi_init_param(_name, _minval, _maxval, _defval)\
140DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
141 beiscsi_##_name##_disp, beiscsi_##_name##_store)
142
143/*
144 * When new log level added update the
145 * the MAX allowed value for log_enable
146 */
147BEISCSI_RW_ATTR(log_enable, 0x00,
148 0xFF, 0x00, "Enable logging Bit Mask\n"
149 "\t\t\t\tInitialization Events : 0x01\n"
150 "\t\t\t\tMailbox Events : 0x02\n"
151 "\t\t\t\tMiscellaneous Events : 0x04\n"
152 "\t\t\t\tError Handling : 0x08\n"
153 "\t\t\t\tIO Path Events : 0x10\n"
154 "\t\t\t\tConfiguration Path : 0x20\n");
155
156struct device_attribute *beiscsi_attrs[] = {
157 &dev_attr_beiscsi_log_enable,
158 NULL,
159};
63 160
64static int beiscsi_slave_configure(struct scsi_device *sdev) 161static int beiscsi_slave_configure(struct scsi_device *sdev)
65{ 162{
@@ -112,9 +209,9 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
112 sizeof(struct invalidate_commands_params_in), 209 sizeof(struct invalidate_commands_params_in),
113 &nonemb_cmd.dma); 210 &nonemb_cmd.dma);
114 if (nonemb_cmd.va == NULL) { 211 if (nonemb_cmd.va == NULL) {
115 SE_DEBUG(DBG_LVL_1, 212 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
116 "Failed to allocate memory for" 213 "BM_%d : Failed to allocate memory for"
117 "mgmt_invalidate_icds\n"); 214 "mgmt_invalidate_icds\n");
118 return FAILED; 215 return FAILED;
119 } 216 }
120 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 217 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
@@ -122,9 +219,9 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
122 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 219 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
123 cid, &nonemb_cmd); 220 cid, &nonemb_cmd);
124 if (!tag) { 221 if (!tag) {
125 shost_printk(KERN_WARNING, phba->shost, 222 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
126 "mgmt_invalidate_icds could not be" 223 "BM_%d : mgmt_invalidate_icds could not be"
127 " submitted\n"); 224 "submitted\n");
128 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 225 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
129 nonemb_cmd.va, nonemb_cmd.dma); 226 nonemb_cmd.va, nonemb_cmd.dma);
130 227
@@ -188,9 +285,9 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
188 sizeof(struct invalidate_commands_params_in), 285 sizeof(struct invalidate_commands_params_in),
189 &nonemb_cmd.dma); 286 &nonemb_cmd.dma);
190 if (nonemb_cmd.va == NULL) { 287 if (nonemb_cmd.va == NULL) {
191 SE_DEBUG(DBG_LVL_1, 288 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
192 "Failed to allocate memory for" 289 "BM_%d : Failed to allocate memory for"
193 "mgmt_invalidate_icds\n"); 290 "mgmt_invalidate_icds\n");
194 return FAILED; 291 return FAILED;
195 } 292 }
196 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 293 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
@@ -198,9 +295,9 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
198 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 295 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
199 cid, &nonemb_cmd); 296 cid, &nonemb_cmd);
200 if (!tag) { 297 if (!tag) {
201 shost_printk(KERN_WARNING, phba->shost, 298 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
202 "mgmt_invalidate_icds could not be" 299 "BM_%d : mgmt_invalidate_icds could not be"
203 " submitted\n"); 300 " submitted\n");
204 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 301 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
205 nonemb_cmd.va, nonemb_cmd.dma); 302 nonemb_cmd.va, nonemb_cmd.dma);
206 return FAILED; 303 return FAILED;
@@ -389,6 +486,7 @@ static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
389}; 486};
390MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 487MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
391 488
489
392static struct scsi_host_template beiscsi_sht = { 490static struct scsi_host_template beiscsi_sht = {
393 .module = THIS_MODULE, 491 .module = THIS_MODULE,
394 .name = "Emulex 10Gbe open-iscsi Initiator Driver", 492 .name = "Emulex 10Gbe open-iscsi Initiator Driver",
@@ -400,6 +498,7 @@ static struct scsi_host_template beiscsi_sht = {
400 .eh_abort_handler = beiscsi_eh_abort, 498 .eh_abort_handler = beiscsi_eh_abort,
401 .eh_device_reset_handler = beiscsi_eh_device_reset, 499 .eh_device_reset_handler = beiscsi_eh_device_reset,
402 .eh_target_reset_handler = iscsi_eh_session_reset, 500 .eh_target_reset_handler = iscsi_eh_session_reset,
501 .shost_attrs = beiscsi_attrs,
403 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 502 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
404 .can_queue = BE2_IO_DEPTH, 503 .can_queue = BE2_IO_DEPTH,
405 .this_id = -1, 504 .this_id = -1,
@@ -419,8 +518,8 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
419 518
420 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); 519 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
421 if (!shost) { 520 if (!shost) {
422 dev_err(&pcidev->dev, "beiscsi_hba_alloc -" 521 dev_err(&pcidev->dev,
423 "iscsi_host_alloc failed\n"); 522 "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
424 return NULL; 523 return NULL;
425 } 524 }
426 shost->dma_boundary = pcidev->dma_mask; 525 shost->dma_boundary = pcidev->dma_mask;
@@ -510,8 +609,8 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
510 609
511 ret = pci_enable_device(pcidev); 610 ret = pci_enable_device(pcidev);
512 if (ret) { 611 if (ret) {
513 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device " 612 dev_err(&pcidev->dev,
514 "failed. Returning -ENODEV\n"); 613 "beiscsi_enable_pci - enable device failed\n");
515 return ret; 614 return ret;
516 } 615 }
517 616
@@ -576,8 +675,9 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
576 + BE2_TMFS) / 512) + 1) * 512; 675 + BE2_TMFS) / 512) + 1) * 512;
577 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024) 676 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
578 ? 1024 : phba->params.num_eq_entries; 677 ? 1024 : phba->params.num_eq_entries;
579 SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n", 678 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
580 phba->params.num_eq_entries); 679 "BM_%d : phba->params.num_eq_entries=%d\n",
680 phba->params.num_eq_entries);
581 phba->params.num_cq_entries = 681 phba->params.num_cq_entries =
582 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 682 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
583 + BE2_TMFS) / 512) + 1) * 512; 683 + BE2_TMFS) / 512) + 1) * 512;
@@ -621,8 +721,6 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id)
621 phba = pbe_eq->phba; 721 phba = pbe_eq->phba;
622 mcc = &phba->ctrl.mcc_obj.cq; 722 mcc = &phba->ctrl.mcc_obj.cq;
623 eqe = queue_tail_node(eq); 723 eqe = queue_tail_node(eq);
624 if (!eqe)
625 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
626 724
627 num_eq_processed = 0; 725 num_eq_processed = 0;
628 726
@@ -667,8 +765,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
667 eq = &pbe_eq->q; 765 eq = &pbe_eq->q;
668 cq = pbe_eq->cq; 766 cq = pbe_eq->cq;
669 eqe = queue_tail_node(eq); 767 eqe = queue_tail_node(eq);
670 if (!eqe)
671 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
672 768
673 phba = pbe_eq->phba; 769 phba = pbe_eq->phba;
674 num_eq_processed = 0; 770 num_eq_processed = 0;
@@ -743,8 +839,6 @@ static irqreturn_t be_isr(int irq, void *dev_id)
743 mcc = &phba->ctrl.mcc_obj.cq; 839 mcc = &phba->ctrl.mcc_obj.cq;
744 index = 0; 840 index = 0;
745 eqe = queue_tail_node(eq); 841 eqe = queue_tail_node(eq);
746 if (!eqe)
747 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
748 842
749 num_ioeq_processed = 0; 843 num_ioeq_processed = 0;
750 num_mcceq_processed = 0; 844 num_mcceq_processed = 0;
@@ -842,9 +936,10 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
842 phba->msi_name[i], 936 phba->msi_name[i],
843 &phwi_context->be_eq[i]); 937 &phwi_context->be_eq[i]);
844 if (ret) { 938 if (ret) {
845 shost_printk(KERN_ERR, phba->shost, 939 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
846 "beiscsi_init_irqs-Failed to" 940 "BM_%d : beiscsi_init_irqs-Failed to"
847 "register msix for i = %d\n", i); 941 "register msix for i = %d\n",
942 i);
848 kfree(phba->msi_name[i]); 943 kfree(phba->msi_name[i]);
849 goto free_msix_irqs; 944 goto free_msix_irqs;
850 } 945 }
@@ -860,8 +955,9 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
860 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i], 955 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
861 &phwi_context->be_eq[i]); 956 &phwi_context->be_eq[i]);
862 if (ret) { 957 if (ret) {
863 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-" 958 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
864 "Failed to register beiscsi_msix_mcc\n"); 959 "BM_%d : beiscsi_init_irqs-"
960 "Failed to register beiscsi_msix_mcc\n");
865 kfree(phba->msi_name[i]); 961 kfree(phba->msi_name[i]);
866 goto free_msix_irqs; 962 goto free_msix_irqs;
867 } 963 }
@@ -870,8 +966,9 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
870 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, 966 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
871 "beiscsi", phba); 967 "beiscsi", phba);
872 if (ret) { 968 if (ret) {
873 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-" 969 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
874 "Failed to register irq\\n"); 970 "BM_%d : beiscsi_init_irqs-"
971 "Failed to register irq\\n");
875 return ret; 972 return ret;
876 } 973 }
877 } 974 }
@@ -922,7 +1019,9 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
922 case ISCSI_OP_REJECT: 1019 case ISCSI_OP_REJECT:
923 WARN_ON(!pbuffer); 1020 WARN_ON(!pbuffer);
924 WARN_ON(!(buf_len == 48)); 1021 WARN_ON(!(buf_len == 48));
925 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n"); 1022 beiscsi_log(phba, KERN_ERR,
1023 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1024 "BM_%d : In ISCSI_OP_REJECT\n");
926 break; 1025 break;
927 case ISCSI_OP_LOGIN_RSP: 1026 case ISCSI_OP_LOGIN_RSP:
928 case ISCSI_OP_TEXT_RSP: 1027 case ISCSI_OP_TEXT_RSP:
@@ -932,11 +1031,12 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
932 login_hdr->itt = io_task->libiscsi_itt; 1031 login_hdr->itt = io_task->libiscsi_itt;
933 break; 1032 break;
934 default: 1033 default:
935 shost_printk(KERN_WARNING, phba->shost, 1034 beiscsi_log(phba, KERN_WARNING,
936 "Unrecognized opcode 0x%x in async msg\n", 1035 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
937 (ppdu-> 1036 "BM_%d : Unrecognized opcode 0x%x in async msg\n",
1037 (ppdu->
938 dw[offsetof(struct amap_pdu_base, opcode) / 32] 1038 dw[offsetof(struct amap_pdu_base, opcode) / 32]
939 & PDUBASE_OPCODE_MASK)); 1039 & PDUBASE_OPCODE_MASK));
940 return 1; 1040 return 1;
941 } 1041 }
942 1042
@@ -951,9 +1051,11 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
951 struct sgl_handle *psgl_handle; 1051 struct sgl_handle *psgl_handle;
952 1052
953 if (phba->io_sgl_hndl_avbl) { 1053 if (phba->io_sgl_hndl_avbl) {
954 SE_DEBUG(DBG_LVL_8, 1054 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
955 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n", 1055 "BM_%d : In alloc_io_sgl_handle,"
956 phba->io_sgl_alloc_index); 1056 " io_sgl_alloc_index=%d\n",
1057 phba->io_sgl_alloc_index);
1058
957 psgl_handle = phba->io_sgl_hndl_base[phba-> 1059 psgl_handle = phba->io_sgl_hndl_base[phba->
958 io_sgl_alloc_index]; 1060 io_sgl_alloc_index];
959 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 1061 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
@@ -971,17 +1073,20 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
971static void 1073static void
972free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1074free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
973{ 1075{
974 SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n", 1076 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
975 phba->io_sgl_free_index); 1077 "BM_%d : In free_,io_sgl_free_index=%d\n",
1078 phba->io_sgl_free_index);
1079
976 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { 1080 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
977 /* 1081 /*
978 * this can happen if clean_task is called on a task that 1082 * this can happen if clean_task is called on a task that
979 * failed in xmit_task or alloc_pdu. 1083 * failed in xmit_task or alloc_pdu.
980 */ 1084 */
981 SE_DEBUG(DBG_LVL_8, 1085 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
982 "Double Free in IO SGL io_sgl_free_index=%d," 1086 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
983 "value there=%p\n", phba->io_sgl_free_index, 1087 "value there=%p\n", phba->io_sgl_free_index,
984 phba->io_sgl_hndl_base[phba->io_sgl_free_index]); 1088 phba->io_sgl_hndl_base
1089 [phba->io_sgl_free_index]);
985 return; 1090 return;
986 } 1091 }
987 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 1092 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
@@ -1043,11 +1148,12 @@ free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1043 else 1148 else
1044 pwrb_context->free_index++; 1149 pwrb_context->free_index++;
1045 1150
1046 SE_DEBUG(DBG_LVL_8, 1151 beiscsi_log(phba, KERN_INFO,
1047 "FREE WRB: pwrb_handle=%p free_index=0x%x" 1152 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1048 "wrb_handles_available=%d\n", 1153 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
1049 pwrb_handle, pwrb_context->free_index, 1154 "wrb_handles_available=%d\n",
1050 pwrb_context->wrb_handles_available); 1155 pwrb_handle, pwrb_context->free_index,
1156 pwrb_context->wrb_handles_available);
1051} 1157}
1052 1158
1053static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1159static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
@@ -1057,8 +1163,11 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1057 if (phba->eh_sgl_hndl_avbl) { 1163 if (phba->eh_sgl_hndl_avbl) {
1058 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1164 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1059 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 1165 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
1060 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n", 1166 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1061 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index); 1167 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
1168 phba->eh_sgl_alloc_index,
1169 phba->eh_sgl_alloc_index);
1170
1062 phba->eh_sgl_hndl_avbl--; 1171 phba->eh_sgl_hndl_avbl--;
1063 if (phba->eh_sgl_alloc_index == 1172 if (phba->eh_sgl_alloc_index ==
1064 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1173 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
@@ -1075,16 +1184,20 @@ void
1075free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1184free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1076{ 1185{
1077 1186
1078 SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d\n", 1187 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1079 phba->eh_sgl_free_index); 1188 "BM_%d : In free_mgmt_sgl_handle,"
1189 "eh_sgl_free_index=%d\n",
1190 phba->eh_sgl_free_index);
1191
1080 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 1192 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1081 /* 1193 /*
1082 * this can happen if clean_task is called on a task that 1194 * this can happen if clean_task is called on a task that
1083 * failed in xmit_task or alloc_pdu. 1195 * failed in xmit_task or alloc_pdu.
1084 */ 1196 */
1085 SE_DEBUG(DBG_LVL_8, 1197 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1086 "Double Free in eh SGL ,eh_sgl_free_index=%d\n", 1198 "BM_%d : Double Free in eh SGL ,"
1087 phba->eh_sgl_free_index); 1199 "eh_sgl_free_index=%d\n",
1200 phba->eh_sgl_free_index);
1088 return; 1201 return;
1089 } 1202 }
1090 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; 1203 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
@@ -1326,9 +1439,10 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1326 break; 1439 break;
1327 1440
1328 case HWH_TYPE_LOGIN: 1441 case HWH_TYPE_LOGIN:
1329 SE_DEBUG(DBG_LVL_1, 1442 beiscsi_log(phba, KERN_ERR,
1330 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd" 1443 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1331 "- Solicited path\n"); 1444 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
1445 " hwi_complete_cmd- Solicited path\n");
1332 break; 1446 break;
1333 1447
1334 case HWH_TYPE_NOP: 1448 case HWH_TYPE_NOP:
@@ -1336,13 +1450,14 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1336 break; 1450 break;
1337 1451
1338 default: 1452 default:
1339 shost_printk(KERN_WARNING, phba->shost, 1453 beiscsi_log(phba, KERN_WARNING,
1340 "In hwi_complete_cmd, unknown type = %d" 1454 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1341 "wrb_index 0x%x CID 0x%x\n", type, 1455 "BM_%d : In hwi_complete_cmd, unknown type = %d"
1342 ((psol->dw[offsetof(struct amap_iscsi_wrb, 1456 "wrb_index 0x%x CID 0x%x\n", type,
1343 type) / 32] & SOL_WRB_INDEX_MASK) >> 16), 1457 ((psol->dw[offsetof(struct amap_iscsi_wrb,
1344 ((psol->dw[offsetof(struct amap_sol_cqe, 1458 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1345 cid) / 32] & SOL_CID_MASK) >> 6)); 1459 ((psol->dw[offsetof(struct amap_sol_cqe,
1460 cid) / 32] & SOL_CID_MASK) >> 6));
1346 break; 1461 break;
1347 } 1462 }
1348 1463
@@ -1397,10 +1512,11 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
1397 break; 1512 break;
1398 default: 1513 default:
1399 pbusy_list = NULL; 1514 pbusy_list = NULL;
1400 shost_printk(KERN_WARNING, phba->shost, 1515 beiscsi_log(phba, KERN_WARNING,
1401 "Unexpected code=%d\n", 1516 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1402 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1517 "BM_%d : Unexpected code=%d\n",
1403 code) / 32] & PDUCQE_CODE_MASK); 1518 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1519 code) / 32] & PDUCQE_CODE_MASK);
1404 return NULL; 1520 return NULL;
1405 } 1521 }
1406 1522
@@ -1425,8 +1541,9 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
1425} 1541}
1426 1542
1427static unsigned int 1543static unsigned int
1428hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx, 1544hwi_update_async_writables(struct beiscsi_hba *phba,
1429 unsigned int is_header, unsigned int cq_index) 1545 struct hwi_async_pdu_context *pasync_ctx,
1546 unsigned int is_header, unsigned int cq_index)
1430{ 1547{
1431 struct list_head *pbusy_list; 1548 struct list_head *pbusy_list;
1432 struct async_pdu_handle *pasync_handle; 1549 struct async_pdu_handle *pasync_handle;
@@ -1463,9 +1580,10 @@ hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1463 } 1580 }
1464 1581
1465 if (!writables) { 1582 if (!writables) {
1466 SE_DEBUG(DBG_LVL_1, 1583 beiscsi_log(phba, KERN_ERR,
1467 "Duplicate notification received - index 0x%x!!\n", 1584 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1468 cq_index); 1585 "BM_%d : Duplicate notification received - index 0x%x!!\n",
1586 cq_index);
1469 WARN_ON(1); 1587 WARN_ON(1);
1470 } 1588 }
1471 1589
@@ -1616,8 +1734,8 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1616 pdpdu_cqe, &cq_index); 1734 pdpdu_cqe, &cq_index);
1617 BUG_ON(pasync_handle->is_header != 0); 1735 BUG_ON(pasync_handle->is_header != 0);
1618 if (pasync_handle->consumed == 0) 1736 if (pasync_handle->consumed == 0)
1619 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header, 1737 hwi_update_async_writables(phba, pasync_ctx,
1620 cq_index); 1738 pasync_handle->is_header, cq_index);
1621 1739
1622 hwi_free_async_msg(phba, pasync_handle->cri); 1740 hwi_free_async_msg(phba, pasync_handle->cri);
1623 hwi_post_async_buffers(phba, pasync_handle->is_header); 1741 hwi_post_async_buffers(phba, pasync_handle->is_header);
@@ -1745,8 +1863,9 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1745 pdpdu_cqe, &cq_index); 1863 pdpdu_cqe, &cq_index);
1746 1864
1747 if (pasync_handle->consumed == 0) 1865 if (pasync_handle->consumed == 0)
1748 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header, 1866 hwi_update_async_writables(phba, pasync_ctx,
1749 cq_index); 1867 pasync_handle->is_header, cq_index);
1868
1750 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle); 1869 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1751 hwi_post_async_buffers(phba, pasync_handle->is_header); 1870 hwi_post_async_buffers(phba, pasync_handle->is_header);
1752} 1871}
@@ -1774,9 +1893,10 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1774 beiscsi_async_link_state_process(phba, 1893 beiscsi_async_link_state_process(phba,
1775 (struct be_async_event_link_state *) mcc_compl); 1894 (struct be_async_event_link_state *) mcc_compl);
1776 else 1895 else
1777 SE_DEBUG(DBG_LVL_1, 1896 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX,
1778 " Unsupported Async Event, flags" 1897 "BM_%d : Unsupported Async Event, flags"
1779 " = 0x%08x\n", mcc_compl->flags); 1898 " = 0x%08x\n",
1899 mcc_compl->flags);
1780 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 1900 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1781 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl); 1901 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1782 atomic_dec(&phba->ctrl.mcc_obj.q.used); 1902 atomic_dec(&phba->ctrl.mcc_obj.q.used);
@@ -1801,6 +1921,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1801 struct dmsg_cqe *dmsg; 1921 struct dmsg_cqe *dmsg;
1802 unsigned int num_processed = 0; 1922 unsigned int num_processed = 0;
1803 unsigned int tot_nump = 0; 1923 unsigned int tot_nump = 0;
1924 unsigned short code = 0, cid = 0;
1804 struct beiscsi_conn *beiscsi_conn; 1925 struct beiscsi_conn *beiscsi_conn;
1805 struct beiscsi_endpoint *beiscsi_ep; 1926 struct beiscsi_endpoint *beiscsi_ep;
1806 struct iscsi_endpoint *ep; 1927 struct iscsi_endpoint *ep;
@@ -1814,10 +1935,11 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1814 CQE_VALID_MASK) { 1935 CQE_VALID_MASK) {
1815 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 1936 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1816 1937
1817 ep = phba->ep_array[(u32) ((sol-> 1938 cid = ((sol->dw[offsetof(struct amap_sol_cqe, cid)/32] &
1818 dw[offsetof(struct amap_sol_cqe, cid) / 32] & 1939 CQE_CID_MASK) >> 6);
1819 SOL_CID_MASK) >> 6) - 1940 code = (sol->dw[offsetof(struct amap_sol_cqe, code)/32] &
1820 phba->fw_config.iscsi_cid_start]; 1941 CQE_CODE_MASK);
1942 ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
1821 1943
1822 beiscsi_ep = ep->dd_data; 1944 beiscsi_ep = ep->dd_data;
1823 beiscsi_conn = beiscsi_ep->conn; 1945 beiscsi_conn = beiscsi_ep->conn;
@@ -1829,32 +1951,41 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1829 num_processed = 0; 1951 num_processed = 0;
1830 } 1952 }
1831 1953
1832 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) / 1954 switch (code) {
1833 32] & CQE_CODE_MASK) {
1834 case SOL_CMD_COMPLETE: 1955 case SOL_CMD_COMPLETE:
1835 hwi_complete_cmd(beiscsi_conn, phba, sol); 1956 hwi_complete_cmd(beiscsi_conn, phba, sol);
1836 break; 1957 break;
1837 case DRIVERMSG_NOTIFY: 1958 case DRIVERMSG_NOTIFY:
1838 SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY\n"); 1959 beiscsi_log(phba, KERN_INFO,
1960 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1961 "BM_%d : Received DRIVERMSG_NOTIFY\n");
1962
1839 dmsg = (struct dmsg_cqe *)sol; 1963 dmsg = (struct dmsg_cqe *)sol;
1840 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 1964 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1841 break; 1965 break;
1842 case UNSOL_HDR_NOTIFY: 1966 case UNSOL_HDR_NOTIFY:
1843 SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n"); 1967 beiscsi_log(phba, KERN_INFO,
1968 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1969 "BM_%d : Received UNSOL_HDR_ NOTIFY\n");
1970
1844 hwi_process_default_pdu_ring(beiscsi_conn, phba, 1971 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1845 (struct i_t_dpdu_cqe *)sol); 1972 (struct i_t_dpdu_cqe *)sol);
1846 break; 1973 break;
1847 case UNSOL_DATA_NOTIFY: 1974 case UNSOL_DATA_NOTIFY:
1848 SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n"); 1975 beiscsi_log(phba, KERN_INFO,
1976 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1977 "BM_%d : Received UNSOL_DATA_NOTIFY\n");
1978
1849 hwi_process_default_pdu_ring(beiscsi_conn, phba, 1979 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1850 (struct i_t_dpdu_cqe *)sol); 1980 (struct i_t_dpdu_cqe *)sol);
1851 break; 1981 break;
1852 case CXN_INVALIDATE_INDEX_NOTIFY: 1982 case CXN_INVALIDATE_INDEX_NOTIFY:
1853 case CMD_INVALIDATED_NOTIFY: 1983 case CMD_INVALIDATED_NOTIFY:
1854 case CXN_INVALIDATE_NOTIFY: 1984 case CXN_INVALIDATE_NOTIFY:
1855 SE_DEBUG(DBG_LVL_1, 1985 beiscsi_log(phba, KERN_ERR,
1856 "Ignoring CQ Error notification for cmd/cxn" 1986 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1857 "invalidate\n"); 1987 "BM_%d : Ignoring CQ Error notification for"
1988 " cmd/cxn invalidate\n");
1858 break; 1989 break;
1859 case SOL_CMD_KILLED_DATA_DIGEST_ERR: 1990 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1860 case CMD_KILLED_INVALID_STATSN_RCVD: 1991 case CMD_KILLED_INVALID_STATSN_RCVD:
@@ -1864,17 +1995,16 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1864 case CMD_CXN_KILLED_ITT_INVALID: 1995 case CMD_CXN_KILLED_ITT_INVALID:
1865 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 1996 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1866 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 1997 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1867 SE_DEBUG(DBG_LVL_1, 1998 beiscsi_log(phba, KERN_ERR,
1868 "CQ Error notification for cmd.. " 1999 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1869 "code %d cid 0x%x\n", 2000 "BM_%d : CQ Error notification for cmd.. "
1870 sol->dw[offsetof(struct amap_sol_cqe, code) / 2001 "code %d cid 0x%x\n", code, cid);
1871 32] & CQE_CODE_MASK,
1872 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1873 32] & SOL_CID_MASK));
1874 break; 2002 break;
1875 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 2003 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1876 SE_DEBUG(DBG_LVL_1, 2004 beiscsi_log(phba, KERN_ERR,
1877 "Digest error on def pdu ring, dropping..\n"); 2005 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2006 "BM_%d : Digest error on def pdu ring,"
2007 " dropping..\n");
1878 hwi_flush_default_pdu_buffer(phba, beiscsi_conn, 2008 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1879 (struct i_t_dpdu_cqe *) sol); 2009 (struct i_t_dpdu_cqe *) sol);
1880 break; 2010 break;
@@ -1892,33 +2022,31 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1892 case CXN_KILLED_OVER_RUN_RESIDUAL: 2022 case CXN_KILLED_OVER_RUN_RESIDUAL:
1893 case CXN_KILLED_UNDER_RUN_RESIDUAL: 2023 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1894 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 2024 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1895 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID " 2025 beiscsi_log(phba, KERN_ERR,
1896 "0x%x...\n", 2026 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1897 sol->dw[offsetof(struct amap_sol_cqe, code) / 2027 "BM_%d : CQ Error %d, reset CID 0x%x...\n",
1898 32] & CQE_CODE_MASK, 2028 code, cid);
1899 (sol->dw[offsetof(struct amap_sol_cqe, cid) / 2029 if (beiscsi_conn)
1900 32] & CQE_CID_MASK)); 2030 iscsi_conn_failure(beiscsi_conn->conn,
1901 iscsi_conn_failure(beiscsi_conn->conn, 2031 ISCSI_ERR_CONN_FAILED);
1902 ISCSI_ERR_CONN_FAILED);
1903 break; 2032 break;
1904 case CXN_KILLED_RST_SENT: 2033 case CXN_KILLED_RST_SENT:
1905 case CXN_KILLED_RST_RCVD: 2034 case CXN_KILLED_RST_RCVD:
1906 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset" 2035 beiscsi_log(phba, KERN_ERR,
1907 "received/sent on CID 0x%x...\n", 2036 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1908 sol->dw[offsetof(struct amap_sol_cqe, code) / 2037 "BM_%d : CQ Error %d, reset"
1909 32] & CQE_CODE_MASK, 2038 "received/sent on CID 0x%x...\n",
1910 (sol->dw[offsetof(struct amap_sol_cqe, cid) / 2039 code, cid);
1911 32] & CQE_CID_MASK)); 2040 if (beiscsi_conn)
1912 iscsi_conn_failure(beiscsi_conn->conn, 2041 iscsi_conn_failure(beiscsi_conn->conn,
1913 ISCSI_ERR_CONN_FAILED); 2042 ISCSI_ERR_CONN_FAILED);
1914 break; 2043 break;
1915 default: 2044 default:
1916 SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d " 2045 beiscsi_log(phba, KERN_ERR,
1917 "received on CID 0x%x...\n", 2046 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1918 sol->dw[offsetof(struct amap_sol_cqe, code) / 2047 "BM_%d : CQ Error Invalid code= %d "
1919 32] & CQE_CODE_MASK, 2048 "received on CID 0x%x...\n",
1920 (sol->dw[offsetof(struct amap_sol_cqe, cid) / 2049 code, cid);
1921 32] & CQE_CID_MASK));
1922 break; 2050 break;
1923 } 2051 }
1924 2052
@@ -1977,7 +2105,10 @@ static int be_iopoll(struct blk_iopoll *iop, int budget)
1977 if (ret < budget) { 2105 if (ret < budget) {
1978 phba = pbe_eq->phba; 2106 phba = pbe_eq->phba;
1979 blk_iopoll_complete(iop); 2107 blk_iopoll_complete(iop);
1980 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id); 2108 beiscsi_log(phba, KERN_INFO,
2109 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2110 "BM_%d : rearm pbe_eq->q.id =%d\n",
2111 pbe_eq->q.id);
1981 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2112 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1982 } 2113 }
1983 return ret; 2114 return ret;
@@ -2348,16 +2479,16 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2348 kzalloc(sizeof(struct wrb_handle *) * 2479 kzalloc(sizeof(struct wrb_handle *) *
2349 phba->params.wrbs_per_cxn, GFP_KERNEL); 2480 phba->params.wrbs_per_cxn, GFP_KERNEL);
2350 if (!pwrb_context->pwrb_handle_base) { 2481 if (!pwrb_context->pwrb_handle_base) {
2351 shost_printk(KERN_ERR, phba->shost, 2482 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2352 "Mem Alloc Failed. Failing to load\n"); 2483 "BM_%d : Mem Alloc Failed. Failing to load\n");
2353 goto init_wrb_hndl_failed; 2484 goto init_wrb_hndl_failed;
2354 } 2485 }
2355 pwrb_context->pwrb_handle_basestd = 2486 pwrb_context->pwrb_handle_basestd =
2356 kzalloc(sizeof(struct wrb_handle *) * 2487 kzalloc(sizeof(struct wrb_handle *) *
2357 phba->params.wrbs_per_cxn, GFP_KERNEL); 2488 phba->params.wrbs_per_cxn, GFP_KERNEL);
2358 if (!pwrb_context->pwrb_handle_basestd) { 2489 if (!pwrb_context->pwrb_handle_basestd) {
2359 shost_printk(KERN_ERR, phba->shost, 2490 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2360 "Mem Alloc Failed. Failing to load\n"); 2491 "BM_%d : Mem Alloc Failed. Failing to load\n");
2361 goto init_wrb_hndl_failed; 2492 goto init_wrb_hndl_failed;
2362 } 2493 }
2363 if (!num_cxn_wrbh) { 2494 if (!num_cxn_wrbh) {
@@ -2438,12 +2569,13 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2438 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2569 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2439 mem_descr += HWI_MEM_ASYNC_HEADER_BUF; 2570 mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2440 if (mem_descr->mem_array[0].virtual_address) { 2571 if (mem_descr->mem_array[0].virtual_address) {
2441 SE_DEBUG(DBG_LVL_8, 2572 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2442 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF" 2573 "BM_%d : hwi_init_async_pdu_ctx"
2443 "va=%p\n", mem_descr->mem_array[0].virtual_address); 2574 " HWI_MEM_ASYNC_HEADER_BUF va=%p\n",
2575 mem_descr->mem_array[0].virtual_address);
2444 } else 2576 } else
2445 shost_printk(KERN_WARNING, phba->shost, 2577 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2446 "No Virtual address\n"); 2578 "BM_%d : No Virtual address\n");
2447 2579
2448 pasync_ctx->async_header.va_base = 2580 pasync_ctx->async_header.va_base =
2449 mem_descr->mem_array[0].virtual_address; 2581 mem_descr->mem_array[0].virtual_address;
@@ -2454,24 +2586,27 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2454 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2586 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2455 mem_descr += HWI_MEM_ASYNC_HEADER_RING; 2587 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2456 if (mem_descr->mem_array[0].virtual_address) { 2588 if (mem_descr->mem_array[0].virtual_address) {
2457 SE_DEBUG(DBG_LVL_8, 2589 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2458 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING" 2590 "BM_%d : hwi_init_async_pdu_ctx"
2459 "va=%p\n", mem_descr->mem_array[0].virtual_address); 2591 " HWI_MEM_ASYNC_HEADER_RING va=%p\n",
2592 mem_descr->mem_array[0].virtual_address);
2460 } else 2593 } else
2461 shost_printk(KERN_WARNING, phba->shost, 2594 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2462 "No Virtual address\n"); 2595 "BM_%d : No Virtual address\n");
2596
2463 pasync_ctx->async_header.ring_base = 2597 pasync_ctx->async_header.ring_base =
2464 mem_descr->mem_array[0].virtual_address; 2598 mem_descr->mem_array[0].virtual_address;
2465 2599
2466 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2600 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2467 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE; 2601 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2468 if (mem_descr->mem_array[0].virtual_address) { 2602 if (mem_descr->mem_array[0].virtual_address) {
2469 SE_DEBUG(DBG_LVL_8, 2603 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2470 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE" 2604 "BM_%d : hwi_init_async_pdu_ctx"
2471 "va=%p\n", mem_descr->mem_array[0].virtual_address); 2605 " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n",
2606 mem_descr->mem_array[0].virtual_address);
2472 } else 2607 } else
2473 shost_printk(KERN_WARNING, phba->shost, 2608 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2474 "No Virtual address\n"); 2609 "BM_%d : No Virtual address\n");
2475 2610
2476 pasync_ctx->async_header.handle_base = 2611 pasync_ctx->async_header.handle_base =
2477 mem_descr->mem_array[0].virtual_address; 2612 mem_descr->mem_array[0].virtual_address;
@@ -2482,12 +2617,13 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2482 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2617 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2483 mem_descr += HWI_MEM_ASYNC_DATA_RING; 2618 mem_descr += HWI_MEM_ASYNC_DATA_RING;
2484 if (mem_descr->mem_array[0].virtual_address) { 2619 if (mem_descr->mem_array[0].virtual_address) {
2485 SE_DEBUG(DBG_LVL_8, 2620 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2486 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING" 2621 "BM_%d : hwi_init_async_pdu_ctx"
2487 "va=%p\n", mem_descr->mem_array[0].virtual_address); 2622 " HWI_MEM_ASYNC_DATA_RING va=%p\n",
2623 mem_descr->mem_array[0].virtual_address);
2488 } else 2624 } else
2489 shost_printk(KERN_WARNING, phba->shost, 2625 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2490 "No Virtual address\n"); 2626 "BM_%d : No Virtual address\n");
2491 2627
2492 pasync_ctx->async_data.ring_base = 2628 pasync_ctx->async_data.ring_base =
2493 mem_descr->mem_array[0].virtual_address; 2629 mem_descr->mem_array[0].virtual_address;
@@ -2495,8 +2631,8 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2495 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2631 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2496 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE; 2632 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2497 if (!mem_descr->mem_array[0].virtual_address) 2633 if (!mem_descr->mem_array[0].virtual_address)
2498 shost_printk(KERN_WARNING, phba->shost, 2634 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2499 "No Virtual address\n"); 2635 "BM_%d : No Virtual address\n");
2500 2636
2501 pasync_ctx->async_data.handle_base = 2637 pasync_ctx->async_data.handle_base =
2502 mem_descr->mem_array[0].virtual_address; 2638 mem_descr->mem_array[0].virtual_address;
@@ -2511,12 +2647,14 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2511 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2647 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2512 mem_descr += HWI_MEM_ASYNC_DATA_BUF; 2648 mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2513 if (mem_descr->mem_array[0].virtual_address) { 2649 if (mem_descr->mem_array[0].virtual_address) {
2514 SE_DEBUG(DBG_LVL_8, 2650 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2515 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF" 2651 "BM_%d : hwi_init_async_pdu_ctx"
2516 "va=%p\n", mem_descr->mem_array[0].virtual_address); 2652 " HWI_MEM_ASYNC_DATA_BUF va=%p\n",
2653 mem_descr->mem_array[0].virtual_address);
2517 } else 2654 } else
2518 shost_printk(KERN_WARNING, phba->shost, 2655 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
2519 "No Virtual address\n"); 2656 "BM_%d : No Virtual address\n");
2657
2520 idx = 0; 2658 idx = 0;
2521 pasync_ctx->async_data.va_base = 2659 pasync_ctx->async_data.va_base =
2522 mem_descr->mem_array[idx].virtual_address; 2660 mem_descr->mem_array[idx].virtual_address;
@@ -2657,7 +2795,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2657 struct hwi_context_memory *phwi_context) 2795 struct hwi_context_memory *phwi_context)
2658{ 2796{
2659 unsigned int i, num_eq_pages; 2797 unsigned int i, num_eq_pages;
2660 int ret, eq_for_mcc; 2798 int ret = 0, eq_for_mcc;
2661 struct be_queue_info *eq; 2799 struct be_queue_info *eq;
2662 struct be_dma_mem *mem; 2800 struct be_dma_mem *mem;
2663 void *eq_vaddress; 2801 void *eq_vaddress;
@@ -2684,8 +2822,8 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2684 ret = be_fill_queue(eq, phba->params.num_eq_entries, 2822 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2685 sizeof(struct be_eq_entry), eq_vaddress); 2823 sizeof(struct be_eq_entry), eq_vaddress);
2686 if (ret) { 2824 if (ret) {
2687 shost_printk(KERN_ERR, phba->shost, 2825 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2688 "be_fill_queue Failed for EQ\n"); 2826 "BM_%d : be_fill_queue Failed for EQ\n");
2689 goto create_eq_error; 2827 goto create_eq_error;
2690 } 2828 }
2691 2829
@@ -2693,12 +2831,15 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2693 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 2831 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2694 phwi_context->cur_eqd); 2832 phwi_context->cur_eqd);
2695 if (ret) { 2833 if (ret) {
2696 shost_printk(KERN_ERR, phba->shost, 2834 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2697 "beiscsi_cmd_eq_create" 2835 "BM_%d : beiscsi_cmd_eq_create"
2698 "Failedfor EQ\n"); 2836 "Failed for EQ\n");
2699 goto create_eq_error; 2837 goto create_eq_error;
2700 } 2838 }
2701 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id); 2839
2840 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2841 "BM_%d : eqid = %d\n",
2842 phwi_context->be_eq[i].q.id);
2702 } 2843 }
2703 return 0; 2844 return 0;
2704create_eq_error: 2845create_eq_error:
@@ -2717,7 +2858,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2717 struct hwi_context_memory *phwi_context) 2858 struct hwi_context_memory *phwi_context)
2718{ 2859{
2719 unsigned int i, num_cq_pages; 2860 unsigned int i, num_cq_pages;
2720 int ret; 2861 int ret = 0;
2721 struct be_queue_info *cq, *eq; 2862 struct be_queue_info *cq, *eq;
2722 struct be_dma_mem *mem; 2863 struct be_dma_mem *mem;
2723 struct be_eq_obj *pbe_eq; 2864 struct be_eq_obj *pbe_eq;
@@ -2742,8 +2883,9 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2742 ret = be_fill_queue(cq, phba->params.num_cq_entries, 2883 ret = be_fill_queue(cq, phba->params.num_cq_entries,
2743 sizeof(struct sol_cqe), cq_vaddress); 2884 sizeof(struct sol_cqe), cq_vaddress);
2744 if (ret) { 2885 if (ret) {
2745 shost_printk(KERN_ERR, phba->shost, 2886 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2746 "be_fill_queue Failed for ISCSI CQ\n"); 2887 "BM_%d : be_fill_queue Failed "
2888 "for ISCSI CQ\n");
2747 goto create_cq_error; 2889 goto create_cq_error;
2748 } 2890 }
2749 2891
@@ -2751,14 +2893,14 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2751 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, 2893 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2752 false, 0); 2894 false, 0);
2753 if (ret) { 2895 if (ret) {
2754 shost_printk(KERN_ERR, phba->shost, 2896 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2755 "beiscsi_cmd_eq_create" 2897 "BM_%d : beiscsi_cmd_eq_create"
2756 "Failed for ISCSI CQ\n"); 2898 "Failed for ISCSI CQ\n");
2757 goto create_cq_error; 2899 goto create_cq_error;
2758 } 2900 }
2759 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n", 2901 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2760 cq->id, eq->id); 2902 "BM_%d : iscsi cq_id is %d for eq_id %d\n"
2761 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n"); 2903 "iSCSI CQ CREATED\n", cq->id, eq->id);
2762 } 2904 }
2763 return 0; 2905 return 0;
2764 2906
@@ -2799,8 +2941,8 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2799 sizeof(struct phys_addr), 2941 sizeof(struct phys_addr),
2800 sizeof(struct phys_addr), dq_vaddress); 2942 sizeof(struct phys_addr), dq_vaddress);
2801 if (ret) { 2943 if (ret) {
2802 shost_printk(KERN_ERR, phba->shost, 2944 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2803 "be_fill_queue Failed for DEF PDU HDR\n"); 2945 "BM_%d : be_fill_queue Failed for DEF PDU HDR\n");
2804 return ret; 2946 return ret;
2805 } 2947 }
2806 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 2948 mem->dma = (unsigned long)mem_descr->mem_array[idx].
@@ -2809,13 +2951,15 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2809 def_pdu_ring_sz, 2951 def_pdu_ring_sz,
2810 phba->params.defpdu_hdr_sz); 2952 phba->params.defpdu_hdr_sz);
2811 if (ret) { 2953 if (ret) {
2812 shost_printk(KERN_ERR, phba->shost, 2954 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2813 "be_cmd_create_default_pdu_queue Failed DEFHDR\n"); 2955 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2814 return ret; 2956 return ret;
2815 } 2957 }
2816 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id; 2958 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2817 SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n", 2959 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2818 phwi_context->be_def_hdrq.id); 2960 "BM_%d : iscsi def pdu id is %d\n",
2961 phwi_context->be_def_hdrq.id);
2962
2819 hwi_post_async_buffers(phba, 1); 2963 hwi_post_async_buffers(phba, 1);
2820 return 0; 2964 return 0;
2821} 2965}
@@ -2844,8 +2988,8 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
2844 sizeof(struct phys_addr), 2988 sizeof(struct phys_addr),
2845 sizeof(struct phys_addr), dq_vaddress); 2989 sizeof(struct phys_addr), dq_vaddress);
2846 if (ret) { 2990 if (ret) {
2847 shost_printk(KERN_ERR, phba->shost, 2991 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2848 "be_fill_queue Failed for DEF PDU DATA\n"); 2992 "BM_%d : be_fill_queue Failed for DEF PDU DATA\n");
2849 return ret; 2993 return ret;
2850 } 2994 }
2851 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 2995 mem->dma = (unsigned long)mem_descr->mem_array[idx].
@@ -2854,16 +2998,20 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
2854 def_pdu_ring_sz, 2998 def_pdu_ring_sz,
2855 phba->params.defpdu_data_sz); 2999 phba->params.defpdu_data_sz);
2856 if (ret) { 3000 if (ret) {
2857 shost_printk(KERN_ERR, phba->shost, 3001 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2858 "be_cmd_create_default_pdu_queue Failed" 3002 "BM_%d be_cmd_create_default_pdu_queue"
2859 " for DEF PDU DATA\n"); 3003 " Failed for DEF PDU DATA\n");
2860 return ret; 3004 return ret;
2861 } 3005 }
2862 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id; 3006 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2863 SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n", 3007 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2864 phwi_context->be_def_dataq.id); 3008 "BM_%d : iscsi def data id is %d\n",
3009 phwi_context->be_def_dataq.id);
3010
2865 hwi_post_async_buffers(phba, 0); 3011 hwi_post_async_buffers(phba, 0);
2866 SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED\n"); 3012 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3013 "BM_%d : DEFAULT PDU DATA RING CREATED\n");
3014
2867 return 0; 3015 return 0;
2868} 3016}
2869 3017
@@ -2889,13 +3037,14 @@ beiscsi_post_pages(struct beiscsi_hba *phba)
2889 (pm_arr->size / PAGE_SIZE)); 3037 (pm_arr->size / PAGE_SIZE));
2890 page_offset += pm_arr->size / PAGE_SIZE; 3038 page_offset += pm_arr->size / PAGE_SIZE;
2891 if (status != 0) { 3039 if (status != 0) {
2892 shost_printk(KERN_ERR, phba->shost, 3040 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2893 "post sgl failed.\n"); 3041 "BM_%d : post sgl failed.\n");
2894 return status; 3042 return status;
2895 } 3043 }
2896 pm_arr++; 3044 pm_arr++;
2897 } 3045 }
2898 SE_DEBUG(DBG_LVL_8, "POSTED PAGES\n"); 3046 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3047 "BM_%d : POSTED PAGES\n");
2899 return 0; 3048 return 0;
2900} 3049}
2901 3050
@@ -2945,8 +3094,8 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2945 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl, 3094 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2946 GFP_KERNEL); 3095 GFP_KERNEL);
2947 if (!pwrb_arr) { 3096 if (!pwrb_arr) {
2948 shost_printk(KERN_ERR, phba->shost, 3097 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2949 "Memory alloc failed in create wrb ring.\n"); 3098 "BM_%d : Memory alloc failed in create wrb ring.\n");
2950 return -ENOMEM; 3099 return -ENOMEM;
2951 } 3100 }
2952 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3101 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
@@ -2990,8 +3139,8 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2990 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3139 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2991 &phwi_context->be_wrbq[i]); 3140 &phwi_context->be_wrbq[i]);
2992 if (status != 0) { 3141 if (status != 0) {
2993 shost_printk(KERN_ERR, phba->shost, 3142 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2994 "wrbq create failed."); 3143 "BM_%d : wrbq create failed.");
2995 kfree(pwrb_arr); 3144 kfree(pwrb_arr);
2996 return status; 3145 return status;
2997 } 3146 }
@@ -3127,7 +3276,6 @@ static int find_num_cpus(void)
3127 if (num_cpus >= MAX_CPUS) 3276 if (num_cpus >= MAX_CPUS)
3128 num_cpus = MAX_CPUS - 1; 3277 num_cpus = MAX_CPUS - 1;
3129 3278
3130 SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", num_cpus);
3131 return num_cpus; 3279 return num_cpus;
3132} 3280}
3133 3281
@@ -3150,7 +3298,8 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3150 3298
3151 status = beiscsi_create_eqs(phba, phwi_context); 3299 status = beiscsi_create_eqs(phba, phwi_context);
3152 if (status != 0) { 3300 if (status != 0) {
3153 shost_printk(KERN_ERR, phba->shost, "EQ not created\n"); 3301 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3302 "BM_%d : EQ not created\n");
3154 goto error; 3303 goto error;
3155 } 3304 }
3156 3305
@@ -3160,51 +3309,55 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3160 3309
3161 status = mgmt_check_supported_fw(ctrl, phba); 3310 status = mgmt_check_supported_fw(ctrl, phba);
3162 if (status != 0) { 3311 if (status != 0) {
3163 shost_printk(KERN_ERR, phba->shost, 3312 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3164 "Unsupported fw version\n"); 3313 "BM_%d : Unsupported fw version\n");
3165 goto error; 3314 goto error;
3166 } 3315 }
3167 3316
3168 status = beiscsi_create_cqs(phba, phwi_context); 3317 status = beiscsi_create_cqs(phba, phwi_context);
3169 if (status != 0) { 3318 if (status != 0) {
3170 shost_printk(KERN_ERR, phba->shost, "CQ not created\n"); 3319 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3320 "BM_%d : CQ not created\n");
3171 goto error; 3321 goto error;
3172 } 3322 }
3173 3323
3174 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr, 3324 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
3175 def_pdu_ring_sz); 3325 def_pdu_ring_sz);
3176 if (status != 0) { 3326 if (status != 0) {
3177 shost_printk(KERN_ERR, phba->shost, 3327 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3178 "Default Header not created\n"); 3328 "BM_%d : Default Header not created\n");
3179 goto error; 3329 goto error;
3180 } 3330 }
3181 3331
3182 status = beiscsi_create_def_data(phba, phwi_context, 3332 status = beiscsi_create_def_data(phba, phwi_context,
3183 phwi_ctrlr, def_pdu_ring_sz); 3333 phwi_ctrlr, def_pdu_ring_sz);
3184 if (status != 0) { 3334 if (status != 0) {
3185 shost_printk(KERN_ERR, phba->shost, 3335 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3186 "Default Data not created\n"); 3336 "BM_%d : Default Data not created\n");
3187 goto error; 3337 goto error;
3188 } 3338 }
3189 3339
3190 status = beiscsi_post_pages(phba); 3340 status = beiscsi_post_pages(phba);
3191 if (status != 0) { 3341 if (status != 0) {
3192 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n"); 3342 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3343 "BM_%d : Post SGL Pages Failed\n");
3193 goto error; 3344 goto error;
3194 } 3345 }
3195 3346
3196 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3347 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
3197 if (status != 0) { 3348 if (status != 0) {
3198 shost_printk(KERN_ERR, phba->shost, 3349 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3199 "WRB Rings not created\n"); 3350 "BM_%d : WRB Rings not created\n");
3200 goto error; 3351 goto error;
3201 } 3352 }
3202 3353
3203 SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n"); 3354 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3355 "BM_%d : hwi_init_port success\n");
3204 return 0; 3356 return 0;
3205 3357
3206error: 3358error:
3207 shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed"); 3359 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3360 "BM_%d : hwi_init_port failed");
3208 hwi_cleanup(phba); 3361 hwi_cleanup(phba);
3209 return status; 3362 return status;
3210} 3363}
@@ -3217,12 +3370,13 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
3217 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { 3370 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3218 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> 3371 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3219 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; 3372 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
3220 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p\n", 3373 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3221 phwi_ctrlr->phwi_ctxt); 3374 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n",
3375 phwi_ctrlr->phwi_ctxt);
3222 } else { 3376 } else {
3223 shost_printk(KERN_ERR, phba->shost, 3377 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3224 "HWI_MEM_ADDN_CONTEXT is more than one element." 3378 "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
3225 "Failing to load\n"); 3379 "than one element.Failing to load\n");
3226 return -ENOMEM; 3380 return -ENOMEM;
3227 } 3381 }
3228 3382
@@ -3232,8 +3386,9 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
3232 3386
3233 hwi_init_async_pdu_ctx(phba); 3387 hwi_init_async_pdu_ctx(phba);
3234 if (hwi_init_port(phba) != 0) { 3388 if (hwi_init_port(phba) != 0) {
3235 shost_printk(KERN_ERR, phba->shost, 3389 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3236 "hwi_init_controller failed\n"); 3390 "BM_%d : hwi_init_controller failed\n");
3391
3237 return -ENOMEM; 3392 return -ENOMEM;
3238 } 3393 }
3239 return 0; 3394 return 0;
@@ -3268,15 +3423,18 @@ static int beiscsi_init_controller(struct beiscsi_hba *phba)
3268 3423
3269 ret = beiscsi_get_memory(phba); 3424 ret = beiscsi_get_memory(phba);
3270 if (ret < 0) { 3425 if (ret < 0) {
3271 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -" 3426 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3272 "Failed in beiscsi_alloc_memory\n"); 3427 "BM_%d : beiscsi_dev_probe -"
3428 "Failed in beiscsi_alloc_memory\n");
3273 return ret; 3429 return ret;
3274 } 3430 }
3275 3431
3276 ret = hwi_init_controller(phba); 3432 ret = hwi_init_controller(phba);
3277 if (ret) 3433 if (ret)
3278 goto free_init; 3434 goto free_init;
3279 SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller"); 3435 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3436 "BM_%d : Return success from beiscsi_init_controller");
3437
3280 return 0; 3438 return 0;
3281 3439
3282free_init: 3440free_init:
@@ -3301,8 +3459,8 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3301 phba->params.ios_per_ctrl, 3459 phba->params.ios_per_ctrl,
3302 GFP_KERNEL); 3460 GFP_KERNEL);
3303 if (!phba->io_sgl_hndl_base) { 3461 if (!phba->io_sgl_hndl_base) {
3304 shost_printk(KERN_ERR, phba->shost, 3462 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3305 "Mem Alloc Failed. Failing to load\n"); 3463 "BM_%d : Mem Alloc Failed. Failing to load\n");
3306 return -ENOMEM; 3464 return -ENOMEM;
3307 } 3465 }
3308 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 3466 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
@@ -3311,14 +3469,14 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3311 GFP_KERNEL); 3469 GFP_KERNEL);
3312 if (!phba->eh_sgl_hndl_base) { 3470 if (!phba->eh_sgl_hndl_base) {
3313 kfree(phba->io_sgl_hndl_base); 3471 kfree(phba->io_sgl_hndl_base);
3314 shost_printk(KERN_ERR, phba->shost, 3472 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3315 "Mem Alloc Failed. Failing to load\n"); 3473 "BM_%d : Mem Alloc Failed. Failing to load\n");
3316 return -ENOMEM; 3474 return -ENOMEM;
3317 } 3475 }
3318 } else { 3476 } else {
3319 shost_printk(KERN_ERR, phba->shost, 3477 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3320 "HWI_MEM_SGLH is more than one element." 3478 "BM_%d : HWI_MEM_SGLH is more than one element."
3321 "Failing to load\n"); 3479 "Failing to load\n");
3322 return -ENOMEM; 3480 return -ENOMEM;
3323 } 3481 }
3324 3482
@@ -3344,15 +3502,18 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3344 } 3502 }
3345 idx++; 3503 idx++;
3346 } 3504 }
3347 SE_DEBUG(DBG_LVL_8, 3505 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3348 "phba->io_sgl_hndl_avbl=%d" 3506 "BM_%d : phba->io_sgl_hndl_avbl=%d"
3349 "phba->eh_sgl_hndl_avbl=%d\n", 3507 "phba->eh_sgl_hndl_avbl=%d\n",
3350 phba->io_sgl_hndl_avbl, 3508 phba->io_sgl_hndl_avbl,
3351 phba->eh_sgl_hndl_avbl); 3509 phba->eh_sgl_hndl_avbl);
3510
3352 mem_descr_sg = phba->init_mem; 3511 mem_descr_sg = phba->init_mem;
3353 mem_descr_sg += HWI_MEM_SGE; 3512 mem_descr_sg += HWI_MEM_SGE;
3354 SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d\n", 3513 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3355 mem_descr_sg->num_elements); 3514 "\n BM_%d : mem_descr_sg->num_elements=%d\n",
3515 mem_descr_sg->num_elements);
3516
3356 arr_index = 0; 3517 arr_index = 0;
3357 idx = 0; 3518 idx = 0;
3358 while (idx < mem_descr_sg->num_elements) { 3519 while (idx < mem_descr_sg->num_elements) {
@@ -3390,17 +3551,17 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3390 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, 3551 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3391 GFP_KERNEL); 3552 GFP_KERNEL);
3392 if (!phba->cid_array) { 3553 if (!phba->cid_array) {
3393 shost_printk(KERN_ERR, phba->shost, 3554 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3394 "Failed to allocate memory in " 3555 "BM_%d : Failed to allocate memory in "
3395 "hba_setup_cid_tbls\n"); 3556 "hba_setup_cid_tbls\n");
3396 return -ENOMEM; 3557 return -ENOMEM;
3397 } 3558 }
3398 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * 3559 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3399 phba->params.cxns_per_ctrl * 2, GFP_KERNEL); 3560 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3400 if (!phba->ep_array) { 3561 if (!phba->ep_array) {
3401 shost_printk(KERN_ERR, phba->shost, 3562 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3402 "Failed to allocate memory in " 3563 "BM_%d : Failed to allocate memory in "
3403 "hba_setup_cid_tbls\n"); 3564 "hba_setup_cid_tbls\n");
3404 kfree(phba->cid_array); 3565 kfree(phba->cid_array);
3405 return -ENOMEM; 3566 return -ENOMEM;
3406 } 3567 }
@@ -3433,18 +3594,22 @@ static void hwi_enable_intr(struct beiscsi_hba *phba)
3433 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3594 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3434 if (!enabled) { 3595 if (!enabled) {
3435 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3596 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3436 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr); 3597 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3598 "BM_%d : reg =x%08x addr=%p\n", reg, addr);
3437 iowrite32(reg, addr); 3599 iowrite32(reg, addr);
3438 } 3600 }
3439 3601
3440 if (!phba->msix_enabled) { 3602 if (!phba->msix_enabled) {
3441 eq = &phwi_context->be_eq[0].q; 3603 eq = &phwi_context->be_eq[0].q;
3442 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id); 3604 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3605 "BM_%d : eq->id=%d\n", eq->id);
3606
3443 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3607 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3444 } else { 3608 } else {
3445 for (i = 0; i <= phba->num_cpus; i++) { 3609 for (i = 0; i <= phba->num_cpus; i++) {
3446 eq = &phwi_context->be_eq[i].q; 3610 eq = &phwi_context->be_eq[i].q;
3447 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id); 3611 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3612 "BM_%d : eq->id=%d\n", eq->id);
3448 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3613 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3449 } 3614 }
3450 } 3615 }
@@ -3462,64 +3627,60 @@ static void hwi_disable_intr(struct beiscsi_hba *phba)
3462 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3627 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3463 iowrite32(reg, addr); 3628 iowrite32(reg, addr);
3464 } else 3629 } else
3465 shost_printk(KERN_WARNING, phba->shost, 3630 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
3466 "In hwi_disable_intr, Already Disabled\n"); 3631 "BM_%d : In hwi_disable_intr, Already Disabled\n");
3467} 3632}
3468 3633
3634/**
3635 * beiscsi_get_boot_info()- Get the boot session info
3636 * @phba: The device priv structure instance
3637 *
3638 * Get the boot target info and store in driver priv structure
3639 *
3640 * return values
3641 * Success: 0
3642 * Failure: Non-Zero Value
3643 **/
3469static int beiscsi_get_boot_info(struct beiscsi_hba *phba) 3644static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3470{ 3645{
3471 struct be_cmd_get_boot_target_resp *boot_resp;
3472 struct be_cmd_get_session_resp *session_resp; 3646 struct be_cmd_get_session_resp *session_resp;
3473 struct be_mcc_wrb *wrb; 3647 struct be_mcc_wrb *wrb;
3474 struct be_dma_mem nonemb_cmd; 3648 struct be_dma_mem nonemb_cmd;
3475 unsigned int tag, wrb_num; 3649 unsigned int tag, wrb_num;
3476 unsigned short status, extd_status; 3650 unsigned short status, extd_status;
3651 unsigned int s_handle;
3477 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; 3652 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
3478 int ret = -ENOMEM; 3653 int ret = -ENOMEM;
3479 3654
3480 tag = mgmt_get_boot_target(phba); 3655 /* Get the session handle of the boot target */
3481 if (!tag) { 3656 ret = be_mgmt_get_boot_shandle(phba, &s_handle);
3482 SE_DEBUG(DBG_LVL_1, "beiscsi_get_boot_info Failed\n"); 3657 if (ret) {
3483 return -EAGAIN; 3658 beiscsi_log(phba, KERN_ERR,
3484 } else 3659 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3485 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 3660 "BM_%d : No boot session\n");
3486 phba->ctrl.mcc_numtag[tag]); 3661 return ret;
3487
3488 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3489 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3490 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3491 if (status || extd_status) {
3492 SE_DEBUG(DBG_LVL_1, "beiscsi_get_boot_info Failed"
3493 " status = %d extd_status = %d\n",
3494 status, extd_status);
3495 free_mcc_tag(&phba->ctrl, tag);
3496 return -EBUSY;
3497 }
3498 wrb = queue_get_wrb(mccq, wrb_num);
3499 free_mcc_tag(&phba->ctrl, tag);
3500 boot_resp = embedded_payload(wrb);
3501
3502 if (boot_resp->boot_session_handle < 0) {
3503 shost_printk(KERN_INFO, phba->shost, "No Boot Session.\n");
3504 return -ENXIO;
3505 } 3662 }
3506
3507 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 3663 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
3508 sizeof(*session_resp), 3664 sizeof(*session_resp),
3509 &nonemb_cmd.dma); 3665 &nonemb_cmd.dma);
3510 if (nonemb_cmd.va == NULL) { 3666 if (nonemb_cmd.va == NULL) {
3511 SE_DEBUG(DBG_LVL_1, 3667 beiscsi_log(phba, KERN_ERR,
3512 "Failed to allocate memory for" 3668 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3513 "beiscsi_get_session_info\n"); 3669 "BM_%d : Failed to allocate memory for"
3670 "beiscsi_get_session_info\n");
3671
3514 return -ENOMEM; 3672 return -ENOMEM;
3515 } 3673 }
3516 3674
3517 memset(nonemb_cmd.va, 0, sizeof(*session_resp)); 3675 memset(nonemb_cmd.va, 0, sizeof(*session_resp));
3518 tag = mgmt_get_session_info(phba, boot_resp->boot_session_handle, 3676 tag = mgmt_get_session_info(phba, s_handle,
3519 &nonemb_cmd); 3677 &nonemb_cmd);
3520 if (!tag) { 3678 if (!tag) {
3521 SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info" 3679 beiscsi_log(phba, KERN_ERR,
3522 " Failed\n"); 3680 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3681 "BM_%d : beiscsi_get_session_info"
3682 " Failed\n");
3683
3523 goto boot_freemem; 3684 goto boot_freemem;
3524 } else 3685 } else
3525 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 3686 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -3529,9 +3690,12 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3529 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; 3690 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3530 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; 3691 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3531 if (status || extd_status) { 3692 if (status || extd_status) {
3532 SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info Failed" 3693 beiscsi_log(phba, KERN_ERR,
3533 " status = %d extd_status = %d\n", 3694 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
3534 status, extd_status); 3695 "BM_%d : beiscsi_get_session_info Failed"
3696 " status = %d extd_status = %d\n",
3697 status, extd_status);
3698
3535 free_mcc_tag(&phba->ctrl, tag); 3699 free_mcc_tag(&phba->ctrl, tag);
3536 goto boot_freemem; 3700 goto boot_freemem;
3537 } 3701 }
@@ -3611,22 +3775,22 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
3611 3775
3612 ret = beiscsi_init_controller(phba); 3776 ret = beiscsi_init_controller(phba);
3613 if (ret < 0) { 3777 if (ret < 0) {
3614 shost_printk(KERN_ERR, phba->shost, 3778 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3615 "beiscsi_dev_probe - Failed in" 3779 "BM_%d : beiscsi_dev_probe - Failed in"
3616 "beiscsi_init_controller\n"); 3780 "beiscsi_init_controller\n");
3617 return ret; 3781 return ret;
3618 } 3782 }
3619 ret = beiscsi_init_sgl_handle(phba); 3783 ret = beiscsi_init_sgl_handle(phba);
3620 if (ret < 0) { 3784 if (ret < 0) {
3621 shost_printk(KERN_ERR, phba->shost, 3785 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3622 "beiscsi_dev_probe - Failed in" 3786 "BM_%d : beiscsi_dev_probe - Failed in"
3623 "beiscsi_init_sgl_handle\n"); 3787 "beiscsi_init_sgl_handle\n");
3624 goto do_cleanup_ctrlr; 3788 goto do_cleanup_ctrlr;
3625 } 3789 }
3626 3790
3627 if (hba_setup_cid_tbls(phba)) { 3791 if (hba_setup_cid_tbls(phba)) {
3628 shost_printk(KERN_ERR, phba->shost, 3792 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3629 "Failed in hba_setup_cid_tbls\n"); 3793 "BM_%d : Failed in hba_setup_cid_tbls\n");
3630 kfree(phba->io_sgl_hndl_base); 3794 kfree(phba->io_sgl_hndl_base);
3631 kfree(phba->eh_sgl_hndl_base); 3795 kfree(phba->eh_sgl_hndl_base);
3632 goto do_cleanup_ctrlr; 3796 goto do_cleanup_ctrlr;
@@ -3678,8 +3842,8 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
3678 3842
3679 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0); 3843 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3680 if (mgmt_status) 3844 if (mgmt_status)
3681 shost_printk(KERN_WARNING, phba->shost, 3845 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
3682 "mgmt_epfw_cleanup FAILED\n"); 3846 "BM_%d : mgmt_epfw_cleanup FAILED\n");
3683 3847
3684 hwi_purge_eq(phba); 3848 hwi_purge_eq(phba);
3685 hwi_cleanup(phba); 3849 hwi_cleanup(phba);
@@ -3960,7 +4124,9 @@ free_hndls:
3960 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4124 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3961 io_task->bhs_pa.u.a64.address); 4125 io_task->bhs_pa.u.a64.address);
3962 io_task->cmd_bhs = NULL; 4126 io_task->cmd_bhs = NULL;
3963 SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n"); 4127 beiscsi_log(phba, KERN_ERR,
4128 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4129 "BM_%d : Alloc of SGL_ICD Failed\n");
3964 return -ENOMEM; 4130 return -ENOMEM;
3965} 4131}
3966 4132
@@ -3981,15 +4147,6 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3981 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4147 io_task->bhs_len = sizeof(struct be_cmd_bhs);
3982 4148
3983 if (writedir) { 4149 if (writedir) {
3984 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3985 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3986 &io_task->cmd_bhs->iscsi_data_pdu,
3987 (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3988 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3989 &io_task->cmd_bhs->iscsi_data_pdu,
3990 ISCSI_OPCODE_SCSI_DATA_OUT);
3991 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3992 &io_task->cmd_bhs->iscsi_data_pdu, 1);
3993 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4150 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3994 INI_WR_CMD); 4151 INI_WR_CMD);
3995 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 4152 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
@@ -3998,9 +4155,6 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3998 INI_RD_CMD); 4155 INI_RD_CMD);
3999 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 4156 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
4000 } 4157 }
4001 memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
4002 dw[offsetof(struct amap_pdu_data_out, lun) / 32],
4003 &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
4004 4158
4005 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 4159 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
4006 cpu_to_be16(*(unsigned short *) 4160 cpu_to_be16(*(unsigned short *)
@@ -4090,8 +4244,10 @@ static int beiscsi_mtask(struct iscsi_task *task)
4090 break; 4244 break;
4091 4245
4092 default: 4246 default:
4093 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported\n", 4247 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4094 task->hdr->opcode & ISCSI_OPCODE_MASK); 4248 "BM_%d : opcode =%d Not supported\n",
4249 task->hdr->opcode & ISCSI_OPCODE_MASK);
4250
4095 return -EINVAL; 4251 return -EINVAL;
4096 } 4252 }
4097 4253
@@ -4123,17 +4279,22 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
4123 io_task->scsi_cmnd = sc; 4279 io_task->scsi_cmnd = sc;
4124 num_sg = scsi_dma_map(sc); 4280 num_sg = scsi_dma_map(sc);
4125 if (num_sg < 0) { 4281 if (num_sg < 0) {
4126 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n") 4282 struct iscsi_conn *conn = task->conn;
4283 struct beiscsi_hba *phba = NULL;
4284
4285 phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
4286 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO,
4287 "BM_%d : scsi_dma_map Failed\n");
4288
4127 return num_sg; 4289 return num_sg;
4128 } 4290 }
4129 xferlen = scsi_bufflen(sc); 4291 xferlen = scsi_bufflen(sc);
4130 sg = scsi_sglist(sc); 4292 sg = scsi_sglist(sc);
4131 if (sc->sc_data_direction == DMA_TO_DEVICE) { 4293 if (sc->sc_data_direction == DMA_TO_DEVICE)
4132 writedir = 1; 4294 writedir = 1;
4133 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x\n", 4295 else
4134 task->imm_count);
4135 } else
4136 writedir = 0; 4296 writedir = 0;
4297
4137 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir); 4298 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
4138} 4299}
4139 4300
@@ -4162,14 +4323,17 @@ static int beiscsi_bsg_request(struct bsg_job *job)
4162 job->request_payload.payload_len, 4323 job->request_payload.payload_len,
4163 &nonemb_cmd.dma); 4324 &nonemb_cmd.dma);
4164 if (nonemb_cmd.va == NULL) { 4325 if (nonemb_cmd.va == NULL) {
4165 SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for " 4326 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4166 "beiscsi_bsg_request\n"); 4327 "BM_%d : Failed to allocate memory for "
4328 "beiscsi_bsg_request\n");
4167 return -EIO; 4329 return -EIO;
4168 } 4330 }
4169 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, 4331 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
4170 &nonemb_cmd); 4332 &nonemb_cmd);
4171 if (!tag) { 4333 if (!tag) {
4172 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n"); 4334 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4335 "BM_%d : be_cmd_get_mac_addr Failed\n");
4336
4173 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4337 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4174 nonemb_cmd.va, nonemb_cmd.dma); 4338 nonemb_cmd.va, nonemb_cmd.dma);
4175 return -EAGAIN; 4339 return -EAGAIN;
@@ -4191,22 +4355,31 @@ static int beiscsi_bsg_request(struct bsg_job *job)
4191 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4355 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4192 nonemb_cmd.va, nonemb_cmd.dma); 4356 nonemb_cmd.va, nonemb_cmd.dma);
4193 if (status || extd_status) { 4357 if (status || extd_status) {
4194 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed" 4358 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4195 " status = %d extd_status = %d\n", 4359 "BM_%d : be_cmd_get_mac_addr Failed"
4196 status, extd_status); 4360 " status = %d extd_status = %d\n",
4361 status, extd_status);
4362
4197 return -EIO; 4363 return -EIO;
4198 } 4364 }
4199 break; 4365 break;
4200 4366
4201 default: 4367 default:
4202 SE_DEBUG(DBG_LVL_1, "Unsupported bsg command: 0x%x\n", 4368 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4203 bsg_req->msgcode); 4369 "BM_%d : Unsupported bsg command: 0x%x\n",
4370 bsg_req->msgcode);
4204 break; 4371 break;
4205 } 4372 }
4206 4373
4207 return rc; 4374 return rc;
4208} 4375}
4209 4376
4377void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
4378{
4379 /* Set the logging parameter */
4380 beiscsi_log_enable_init(phba, beiscsi_log_enable);
4381}
4382
4210static void beiscsi_quiesce(struct beiscsi_hba *phba) 4383static void beiscsi_quiesce(struct beiscsi_hba *phba)
4211{ 4384{
4212 struct hwi_controller *phwi_ctrlr; 4385 struct hwi_controller *phwi_ctrlr;
@@ -4316,18 +4489,21 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4316 4489
4317 ret = beiscsi_enable_pci(pcidev); 4490 ret = beiscsi_enable_pci(pcidev);
4318 if (ret < 0) { 4491 if (ret < 0) {
4319 dev_err(&pcidev->dev, "beiscsi_dev_probe-" 4492 dev_err(&pcidev->dev,
4320 " Failed to enable pci device\n"); 4493 "beiscsi_dev_probe - Failed to enable pci device\n");
4321 return ret; 4494 return ret;
4322 } 4495 }
4323 4496
4324 phba = beiscsi_hba_alloc(pcidev); 4497 phba = beiscsi_hba_alloc(pcidev);
4325 if (!phba) { 4498 if (!phba) {
4326 dev_err(&pcidev->dev, "beiscsi_dev_probe-" 4499 dev_err(&pcidev->dev,
4327 " Failed in beiscsi_hba_alloc\n"); 4500 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
4328 goto disable_pci; 4501 goto disable_pci;
4329 } 4502 }
4330 4503
4504 /* Initialize Driver configuration Paramters */
4505 beiscsi_hba_attrs_init(phba);
4506
4331 switch (pcidev->device) { 4507 switch (pcidev->device) {
4332 case BE_DEVICE_ID1: 4508 case BE_DEVICE_ID1:
4333 case OC_DEVICE_ID1: 4509 case OC_DEVICE_ID1:
@@ -4347,7 +4523,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4347 else 4523 else
4348 num_cpus = 1; 4524 num_cpus = 1;
4349 phba->num_cpus = num_cpus; 4525 phba->num_cpus = num_cpus;
4350 SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus); 4526 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4527 "BM_%d : num_cpus = %d\n",
4528 phba->num_cpus);
4351 4529
4352 if (enable_msix) { 4530 if (enable_msix) {
4353 beiscsi_msix_enable(phba); 4531 beiscsi_msix_enable(phba);
@@ -4356,8 +4534,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4356 } 4534 }
4357 ret = be_ctrl_init(phba, pcidev); 4535 ret = be_ctrl_init(phba, pcidev);
4358 if (ret) { 4536 if (ret) {
4359 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 4537 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4360 "Failed in be_ctrl_init\n"); 4538 "BM_%d : beiscsi_dev_probe-"
4539 "Failed in be_ctrl_init\n");
4361 goto hba_free; 4540 goto hba_free;
4362 } 4541 }
4363 4542
@@ -4366,19 +4545,19 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4366 value = readl((void *)real_offset); 4545 value = readl((void *)real_offset);
4367 if (value & 0x00010000) { 4546 if (value & 0x00010000) {
4368 gcrashmode++; 4547 gcrashmode++;
4369 shost_printk(KERN_ERR, phba->shost, 4548 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4370 "Loading Driver in crashdump mode\n"); 4549 "BM_%d : Loading Driver in crashdump mode\n");
4371 ret = beiscsi_cmd_reset_function(phba); 4550 ret = beiscsi_cmd_reset_function(phba);
4372 if (ret) { 4551 if (ret) {
4373 shost_printk(KERN_ERR, phba->shost, 4552 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4374 "Reset Failed. Aborting Crashdump\n"); 4553 "BM_%d : Reset Failed. Aborting Crashdump\n");
4375 goto hba_free; 4554 goto hba_free;
4376 } 4555 }
4377 ret = be_chk_reset_complete(phba); 4556 ret = be_chk_reset_complete(phba);
4378 if (ret) { 4557 if (ret) {
4379 shost_printk(KERN_ERR, phba->shost, 4558 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4380 "Failed to get out of reset." 4559 "BM_%d : Failed to get out of reset."
4381 "Aborting Crashdump\n"); 4560 "Aborting Crashdump\n");
4382 goto hba_free; 4561 goto hba_free;
4383 } 4562 }
4384 } else { 4563 } else {
@@ -4393,8 +4572,8 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4393 spin_lock_init(&phba->isr_lock); 4572 spin_lock_init(&phba->isr_lock);
4394 ret = mgmt_get_fw_config(&phba->ctrl, phba); 4573 ret = mgmt_get_fw_config(&phba->ctrl, phba);
4395 if (ret != 0) { 4574 if (ret != 0) {
4396 shost_printk(KERN_ERR, phba->shost, 4575 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4397 "Error getting fw config\n"); 4576 "BM_%d : Error getting fw config\n");
4398 goto free_port; 4577 goto free_port;
4399 } 4578 }
4400 phba->shost->max_id = phba->fw_config.iscsi_cid_count; 4579 phba->shost->max_id = phba->fw_config.iscsi_cid_count;
@@ -4402,8 +4581,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4402 phba->shost->can_queue = phba->params.ios_per_ctrl; 4581 phba->shost->can_queue = phba->params.ios_per_ctrl;
4403 ret = beiscsi_init_port(phba); 4582 ret = beiscsi_init_port(phba);
4404 if (ret < 0) { 4583 if (ret < 0) {
4405 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 4584 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4406 "Failed in beiscsi_init_port\n"); 4585 "BM_%d : beiscsi_dev_probe-"
4586 "Failed in beiscsi_init_port\n");
4407 goto free_port; 4587 goto free_port;
4408 } 4588 }
4409 4589
@@ -4420,8 +4600,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4420 phba->shost->host_no); 4600 phba->shost->host_no);
4421 phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1); 4601 phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
4422 if (!phba->wq) { 4602 if (!phba->wq) {
4423 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 4603 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4424 "Failed to allocate work queue\n"); 4604 "BM_%d : beiscsi_dev_probe-"
4605 "Failed to allocate work queue\n");
4425 goto free_twq; 4606 goto free_twq;
4426 } 4607 }
4427 4608
@@ -4439,8 +4620,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4439 } 4620 }
4440 ret = beiscsi_init_irqs(phba); 4621 ret = beiscsi_init_irqs(phba);
4441 if (ret < 0) { 4622 if (ret < 0) {
4442 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 4623 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4443 "Failed to beiscsi_init_irqs\n"); 4624 "BM_%d : beiscsi_dev_probe-"
4625 "Failed to beiscsi_init_irqs\n");
4444 goto free_blkenbld; 4626 goto free_blkenbld;
4445 } 4627 }
4446 hwi_enable_intr(phba); 4628 hwi_enable_intr(phba);
@@ -4450,11 +4632,13 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4450 * log error but continue, because we may not be using 4632 * log error but continue, because we may not be using
4451 * iscsi boot. 4633 * iscsi boot.
4452 */ 4634 */
4453 shost_printk(KERN_ERR, phba->shost, "Could not set up " 4635 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4454 "iSCSI boot info.\n"); 4636 "BM_%d : Could not set up "
4637 "iSCSI boot info.\n");
4455 4638
4456 beiscsi_create_def_ifaces(phba); 4639 beiscsi_create_def_ifaces(phba);
4457 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n"); 4640 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4641 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
4458 return 0; 4642 return 0;
4459 4643
4460free_blkenbld: 4644free_blkenbld:
@@ -4542,19 +4726,17 @@ static int __init beiscsi_module_init(void)
4542 beiscsi_scsi_transport = 4726 beiscsi_scsi_transport =
4543 iscsi_register_transport(&beiscsi_iscsi_transport); 4727 iscsi_register_transport(&beiscsi_iscsi_transport);
4544 if (!beiscsi_scsi_transport) { 4728 if (!beiscsi_scsi_transport) {
4545 SE_DEBUG(DBG_LVL_1, 4729 printk(KERN_ERR
4546 "beiscsi_module_init - Unable to register beiscsi" 4730 "beiscsi_module_init - Unable to register beiscsi transport.\n");
4547 "transport.\n");
4548 return -ENOMEM; 4731 return -ENOMEM;
4549 } 4732 }
4550 SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p\n", 4733 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
4551 &beiscsi_iscsi_transport); 4734 &beiscsi_iscsi_transport);
4552 4735
4553 ret = pci_register_driver(&beiscsi_pci_driver); 4736 ret = pci_register_driver(&beiscsi_pci_driver);
4554 if (ret) { 4737 if (ret) {
4555 SE_DEBUG(DBG_LVL_1, 4738 printk(KERN_ERR
4556 "beiscsi_module_init - Unable to register" 4739 "beiscsi_module_init - Unable to register beiscsi pci driver.\n");
4557 "beiscsi pci driver.\n");
4558 goto unregister_iscsi_transport; 4740 goto unregister_iscsi_transport;
4559 } 4741 }
4560 return 0; 4742 return 0;
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 40fea6ec879..b8912263ef4 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -24,6 +24,8 @@
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/if_ether.h> 25#include <linux/if_ether.h>
26#include <linux/in.h> 26#include <linux/in.h>
27#include <linux/ctype.h>
28#include <linux/module.h>
27#include <scsi/scsi.h> 29#include <scsi/scsi.h>
28#include <scsi/scsi_cmnd.h> 30#include <scsi/scsi_cmnd.h>
29#include <scsi/scsi_device.h> 31#include <scsi/scsi_device.h>
@@ -34,7 +36,7 @@
34 36
35#include "be.h" 37#include "be.h"
36#define DRV_NAME "be2iscsi" 38#define DRV_NAME "be2iscsi"
37#define BUILD_STR "4.2.162.0" 39#define BUILD_STR "4.4.58.0"
38#define BE_NAME "Emulex OneConnect" \ 40#define BE_NAME "Emulex OneConnect" \
39 "Open-iSCSI Driver version" BUILD_STR 41 "Open-iSCSI Driver version" BUILD_STR
40#define DRV_DESC BE_NAME " " "Driver" 42#define DRV_DESC BE_NAME " " "Driver"
@@ -84,23 +86,7 @@
84#define MAX_CMD_SZ 65536 86#define MAX_CMD_SZ 65536
85#define IIOC_SCSI_DATA 0x05 /* Write Operation */ 87#define IIOC_SCSI_DATA 0x05 /* Write Operation */
86 88
87#define DBG_LVL 0x00000001 89#define INVALID_SESS_HANDLE 0xFFFFFFFF
88#define DBG_LVL_1 0x00000001
89#define DBG_LVL_2 0x00000002
90#define DBG_LVL_3 0x00000004
91#define DBG_LVL_4 0x00000008
92#define DBG_LVL_5 0x00000010
93#define DBG_LVL_6 0x00000020
94#define DBG_LVL_7 0x00000040
95#define DBG_LVL_8 0x00000080
96
97#define SE_DEBUG(debug_mask, fmt, args...) \
98do { \
99 if (debug_mask & DBG_LVL) { \
100 printk(KERN_ERR "(%s():%d):", __func__, __LINE__);\
101 printk(fmt, ##args); \
102 } \
103} while (0);
104 90
105#define BE_ADAPTER_UP 0x00000000 91#define BE_ADAPTER_UP 0x00000000
106#define BE_ADAPTER_LINK_DOWN 0x00000001 92#define BE_ADAPTER_LINK_DOWN 0x00000001
@@ -351,6 +337,8 @@ struct beiscsi_hba {
351 struct mgmt_session_info boot_sess; 337 struct mgmt_session_info boot_sess;
352 struct invalidate_command_table inv_tbl[128]; 338 struct invalidate_command_table inv_tbl[128];
353 339
340 unsigned int attr_log_enable;
341
354}; 342};
355 343
356struct beiscsi_session { 344struct beiscsi_session {
@@ -860,4 +848,20 @@ struct hwi_context_memory {
860 struct hwi_async_pdu_context *pasync_ctx; 848 struct hwi_async_pdu_context *pasync_ctx;
861}; 849};
862 850
851/* Logging related definitions */
852#define BEISCSI_LOG_INIT 0x0001 /* Initialization events */
853#define BEISCSI_LOG_MBOX 0x0002 /* Mailbox Events */
854#define BEISCSI_LOG_MISC 0x0004 /* Miscllaneous Events */
855#define BEISCSI_LOG_EH 0x0008 /* Error Handler */
856#define BEISCSI_LOG_IO 0x0010 /* IO Code Path */
857#define BEISCSI_LOG_CONFIG 0x0020 /* CONFIG Code Path */
858
859#define beiscsi_log(phba, level, mask, fmt, arg...) \
860do { \
861 uint32_t log_value = phba->attr_log_enable; \
862 if (((mask) & log_value) || (level[1] <= '3')) \
863 shost_printk(level, phba->shost, \
864 fmt, __LINE__, ##arg); \
865} while (0)
866
863#endif 867#endif
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 2a096795b9a..aab5dd359e2 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -23,6 +23,53 @@
23#include "be_mgmt.h" 23#include "be_mgmt.h"
24#include "be_iscsi.h" 24#include "be_iscsi.h"
25 25
26/**
27 * mgmt_reopen_session()- Reopen a session based on reopen_type
28 * @phba: Device priv structure instance
29 * @reopen_type: Type of reopen_session FW should do.
30 * @sess_handle: Session Handle of the session to be re-opened
31 *
32 * return
33 * the TAG used for MBOX Command
34 *
35 **/
36unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
37 unsigned int reopen_type,
38 unsigned int sess_handle)
39{
40 struct be_ctrl_info *ctrl = &phba->ctrl;
41 struct be_mcc_wrb *wrb;
42 struct be_cmd_reopen_session_req *req;
43 unsigned int tag = 0;
44
45 beiscsi_log(phba, KERN_INFO,
46 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
47 "BG_%d : In bescsi_get_boot_target\n");
48
49 spin_lock(&ctrl->mbox_lock);
50 tag = alloc_mcc_tag(phba);
51 if (!tag) {
52 spin_unlock(&ctrl->mbox_lock);
53 return tag;
54 }
55
56 wrb = wrb_from_mccq(phba);
57 req = embedded_payload(wrb);
58 wrb->tag0 |= tag;
59 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
60 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
61 OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS,
62 sizeof(struct be_cmd_reopen_session_resp));
63
64 /* set the reopen_type,sess_handle */
65 req->reopen_type = reopen_type;
66 req->session_handle = sess_handle;
67
68 be_mcc_notify(phba);
69 spin_unlock(&ctrl->mbox_lock);
70 return tag;
71}
72
26unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba) 73unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
27{ 74{
28 struct be_ctrl_info *ctrl = &phba->ctrl; 75 struct be_ctrl_info *ctrl = &phba->ctrl;
@@ -30,7 +77,10 @@ unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
30 struct be_cmd_get_boot_target_req *req; 77 struct be_cmd_get_boot_target_req *req;
31 unsigned int tag = 0; 78 unsigned int tag = 0;
32 79
33 SE_DEBUG(DBG_LVL_8, "In bescsi_get_boot_target\n"); 80 beiscsi_log(phba, KERN_INFO,
81 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
82 "BG_%d : In bescsi_get_boot_target\n");
83
34 spin_lock(&ctrl->mbox_lock); 84 spin_lock(&ctrl->mbox_lock);
35 tag = alloc_mcc_tag(phba); 85 tag = alloc_mcc_tag(phba);
36 if (!tag) { 86 if (!tag) {
@@ -62,7 +112,10 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
62 struct be_cmd_get_session_resp *resp; 112 struct be_cmd_get_session_resp *resp;
63 struct be_sge *sge; 113 struct be_sge *sge;
64 114
65 SE_DEBUG(DBG_LVL_8, "In beiscsi_get_session_info\n"); 115 beiscsi_log(phba, KERN_INFO,
116 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
117 "BG_%d : In beiscsi_get_session_info\n");
118
66 spin_lock(&ctrl->mbox_lock); 119 spin_lock(&ctrl->mbox_lock);
67 tag = alloc_mcc_tag(phba); 120 tag = alloc_mcc_tag(phba);
68 if (!tag) { 121 if (!tag) {
@@ -121,16 +174,16 @@ int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
121 phba->fw_config.iscsi_cid_count = 174 phba->fw_config.iscsi_cid_count =
122 pfw_cfg->ulp[0].sq_count; 175 pfw_cfg->ulp[0].sq_count;
123 if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) { 176 if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
124 SE_DEBUG(DBG_LVL_8, 177 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
125 "FW reported MAX CXNS as %d\t" 178 "BG_%d : FW reported MAX CXNS as %d\t"
126 "Max Supported = %d.\n", 179 "Max Supported = %d.\n",
127 phba->fw_config.iscsi_cid_count, 180 phba->fw_config.iscsi_cid_count,
128 BE2_MAX_SESSIONS); 181 BE2_MAX_SESSIONS);
129 phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2; 182 phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2;
130 } 183 }
131 } else { 184 } else {
132 shost_printk(KERN_WARNING, phba->shost, 185 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
133 "Failed in mgmt_get_fw_config\n"); 186 "BG_%d : Failed in mgmt_get_fw_config\n");
134 } 187 }
135 188
136 spin_unlock(&ctrl->mbox_lock); 189 spin_unlock(&ctrl->mbox_lock);
@@ -150,9 +203,9 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
150 sizeof(struct be_mgmt_controller_attributes), 203 sizeof(struct be_mgmt_controller_attributes),
151 &nonemb_cmd.dma); 204 &nonemb_cmd.dma);
152 if (nonemb_cmd.va == NULL) { 205 if (nonemb_cmd.va == NULL) {
153 SE_DEBUG(DBG_LVL_1, 206 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
154 "Failed to allocate memory for mgmt_check_supported_fw" 207 "BG_%d : Failed to allocate memory for "
155 "\n"); 208 "mgmt_check_supported_fw\n");
156 return -ENOMEM; 209 return -ENOMEM;
157 } 210 }
158 nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes); 211 nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
@@ -169,18 +222,23 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
169 status = be_mbox_notify(ctrl); 222 status = be_mbox_notify(ctrl);
170 if (!status) { 223 if (!status) {
171 struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va; 224 struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
172 SE_DEBUG(DBG_LVL_8, "Firmware version of CMD: %s\n", 225 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
173 resp->params.hba_attribs.flashrom_version_string); 226 "BG_%d : Firmware Version of CMD : %s\n"
174 SE_DEBUG(DBG_LVL_8, "Firmware version is : %s\n", 227 "Firmware Version is : %s\n"
175 resp->params.hba_attribs.firmware_version_string); 228 "Developer Build, not performing version check...\n",
176 SE_DEBUG(DBG_LVL_8, 229 resp->params.hba_attribs
177 "Developer Build, not performing version check...\n"); 230 .flashrom_version_string,
231 resp->params.hba_attribs.
232 firmware_version_string);
233
178 phba->fw_config.iscsi_features = 234 phba->fw_config.iscsi_features =
179 resp->params.hba_attribs.iscsi_features; 235 resp->params.hba_attribs.iscsi_features;
180 SE_DEBUG(DBG_LVL_8, " phba->fw_config.iscsi_features = %d\n", 236 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
181 phba->fw_config.iscsi_features); 237 "BM_%d : phba->fw_config.iscsi_features = %d\n",
238 phba->fw_config.iscsi_features);
182 } else 239 } else
183 SE_DEBUG(DBG_LVL_1, " Failed in mgmt_check_supported_fw\n"); 240 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
241 "BG_%d : Failed in mgmt_check_supported_fw\n");
184 spin_unlock(&ctrl->mbox_lock); 242 spin_unlock(&ctrl->mbox_lock);
185 if (nonemb_cmd.va) 243 if (nonemb_cmd.va)
186 pci_free_consistent(ctrl->pdev, nonemb_cmd.size, 244 pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
@@ -229,9 +287,10 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
229 OPCODE_COMMON_READ_FLASH, sizeof(*req)); 287 OPCODE_COMMON_READ_FLASH, sizeof(*req));
230 break; 288 break;
231 default: 289 default:
232 shost_printk(KERN_WARNING, phba->shost, 290 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
233 "Unsupported cmd = 0x%x\n\n", bsg_req->rqst_data. 291 "BG_%d : Unsupported cmd = 0x%x\n\n",
234 h_vendor.vendor_cmd[0]); 292 bsg_req->rqst_data.h_vendor.vendor_cmd[0]);
293
235 spin_unlock(&ctrl->mbox_lock); 294 spin_unlock(&ctrl->mbox_lock);
236 return -ENOSYS; 295 return -ENOSYS;
237 } 296 }
@@ -275,8 +334,8 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
275 334
276 status = be_mcc_notify_wait(phba); 335 status = be_mcc_notify_wait(phba);
277 if (status) 336 if (status)
278 shost_printk(KERN_WARNING, phba->shost, 337 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
279 " mgmt_epfw_cleanup , FAILED\n"); 338 "BG_%d : mgmt_epfw_cleanup , FAILED\n");
280 spin_unlock(&ctrl->mbox_lock); 339 spin_unlock(&ctrl->mbox_lock);
281 return status; 340 return status;
282} 341}
@@ -459,8 +518,9 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
459 &daddr_in6->sin6_addr.in6_u.u6_addr8, 16); 518 &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
460 beiscsi_ep->ip_type = BE2_IPV6; 519 beiscsi_ep->ip_type = BE2_IPV6;
461 } else{ 520 } else{
462 shost_printk(KERN_ERR, phba->shost, "unknown addr family %d\n", 521 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
463 dst_addr->sa_family); 522 "BG_%d : unknown addr family %d\n",
523 dst_addr->sa_family);
464 spin_unlock(&ctrl->mbox_lock); 524 spin_unlock(&ctrl->mbox_lock);
465 free_mcc_tag(&phba->ctrl, tag); 525 free_mcc_tag(&phba->ctrl, tag);
466 return -EINVAL; 526 return -EINVAL;
@@ -471,7 +531,8 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
471 if (phba->nxt_cqid == phba->num_cpus) 531 if (phba->nxt_cqid == phba->num_cpus)
472 phba->nxt_cqid = 0; 532 phba->nxt_cqid = 0;
473 req->cq_id = phwi_context->be_cq[i].id; 533 req->cq_id = phwi_context->be_cq[i].id;
474 SE_DEBUG(DBG_LVL_8, "i=%d cq_id=%d\n", i, req->cq_id); 534 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
535 "BG_%d : i=%d cq_id=%d\n", i, req->cq_id);
475 req->defq_id = def_hdr_id; 536 req->defq_id = def_hdr_id;
476 req->hdr_ring_id = def_hdr_id; 537 req->hdr_ring_id = def_hdr_id;
477 req->data_ring_id = def_data_id; 538 req->data_ring_id = def_data_id;
@@ -506,8 +567,8 @@ unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
506 if (!status) 567 if (!status)
507 phba->interface_handle = pbe_allid->if_hndl_list[0]; 568 phba->interface_handle = pbe_allid->if_hndl_list[0];
508 else { 569 else {
509 shost_printk(KERN_WARNING, phba->shost, 570 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
510 "Failed in mgmt_get_all_if_id\n"); 571 "BG_%d : Failed in mgmt_get_all_if_id\n");
511 } 572 }
512 spin_unlock(&ctrl->mbox_lock); 573 spin_unlock(&ctrl->mbox_lock);
513 574
@@ -550,9 +611,10 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
550 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; 611 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
551 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; 612 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
552 if (status || extd_status) { 613 if (status || extd_status) {
553 SE_DEBUG(DBG_LVL_1, 614 beiscsi_log(phba, KERN_ERR,
554 "mgmt_exec_nonemb_cmd Failed status = %d" 615 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
555 "extd_status = %d\n", status, extd_status); 616 "BG_%d : mgmt_exec_nonemb_cmd Failed status = %d"
617 "extd_status = %d\n", status, extd_status);
556 rc = -EIO; 618 rc = -EIO;
557 goto free_tag; 619 goto free_tag;
558 } 620 }
@@ -573,7 +635,8 @@ static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
573{ 635{
574 cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma); 636 cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
575 if (!cmd->va) { 637 if (!cmd->va) {
576 SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n"); 638 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
639 "BG_%d : Failed to allocate memory for if info\n");
577 return -ENOMEM; 640 return -ENOMEM;
578 } 641 }
579 memset(cmd->va, 0, size); 642 memset(cmd->va, 0, size);
@@ -629,8 +692,8 @@ mgmt_static_ip_modify(struct beiscsi_hba *phba,
629 692
630 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); 693 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
631 if (rc < 0) 694 if (rc < 0)
632 shost_printk(KERN_WARNING, phba->shost, 695 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
633 "Failed to Modify existing IP Address\n"); 696 "BG_%d : Failed to Modify existing IP Address\n");
634 return rc; 697 return rc;
635} 698}
636 699
@@ -684,8 +747,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
684 747
685 if (boot_proto == ISCSI_BOOTPROTO_DHCP) { 748 if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
686 if (if_info.dhcp_state) { 749 if (if_info.dhcp_state) {
687 shost_printk(KERN_WARNING, phba->shost, 750 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
688 "DHCP Already Enabled\n"); 751 "BG_%d : DHCP Already Enabled\n");
689 return 0; 752 return 0;
690 } 753 }
691 /* The ip_param->len is 1 in DHCP case. Setting 754 /* The ip_param->len is 1 in DHCP case. Setting
@@ -712,8 +775,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
712 775
713 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); 776 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
714 if (rc < 0) { 777 if (rc < 0) {
715 shost_printk(KERN_WARNING, phba->shost, 778 beiscsi_log(phba, KERN_WARNING,
716 "Failed to Delete existing dhcp\n"); 779 BEISCSI_LOG_CONFIG,
780 "BG_%d : Failed to Delete existing dhcp\n");
717 return rc; 781 return rc;
718 } 782 }
719 } 783 }
@@ -732,8 +796,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
732 memset(&gtway_addr_set, 0, sizeof(gtway_addr_set)); 796 memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
733 rc = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set); 797 rc = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
734 if (rc) { 798 if (rc) {
735 shost_printk(KERN_WARNING, phba->shost, 799 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
736 "Failed to Get Gateway Addr\n"); 800 "BG_%d : Failed to Get Gateway Addr\n");
737 return rc; 801 return rc;
738 } 802 }
739 803
@@ -743,8 +807,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
743 IP_ACTION_DEL, IP_V4_LEN); 807 IP_ACTION_DEL, IP_V4_LEN);
744 808
745 if (rc) { 809 if (rc) {
746 shost_printk(KERN_WARNING, phba->shost, 810 beiscsi_log(phba, KERN_WARNING,
747 "Failed to clear Gateway Addr Set\n"); 811 BEISCSI_LOG_CONFIG,
812 "BG_%d : Failed to clear Gateway Addr Set\n");
748 return rc; 813 return rc;
749 } 814 }
750 } 815 }
@@ -783,8 +848,8 @@ int mgmt_set_gateway(struct beiscsi_hba *phba,
783 memset(&gtway_addr_set, 0, sizeof(gtway_addr_set)); 848 memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
784 rt_val = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set); 849 rt_val = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
785 if (rt_val) { 850 if (rt_val) {
786 shost_printk(KERN_WARNING, phba->shost, 851 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
787 "Failed to Get Gateway Addr\n"); 852 "BG_%d : Failed to Get Gateway Addr\n");
788 return rt_val; 853 return rt_val;
789 } 854 }
790 855
@@ -793,8 +858,8 @@ int mgmt_set_gateway(struct beiscsi_hba *phba,
793 rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_DEL, 858 rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_DEL,
794 gateway_param->len); 859 gateway_param->len);
795 if (rt_val) { 860 if (rt_val) {
796 shost_printk(KERN_WARNING, phba->shost, 861 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
797 "Failed to clear Gateway Addr Set\n"); 862 "BG_%d : Failed to clear Gateway Addr Set\n");
798 return rt_val; 863 return rt_val;
799 } 864 }
800 } 865 }
@@ -804,8 +869,8 @@ int mgmt_set_gateway(struct beiscsi_hba *phba,
804 gateway_param->len); 869 gateway_param->len);
805 870
806 if (rt_val) 871 if (rt_val)
807 shost_printk(KERN_WARNING, phba->shost, 872 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
808 "Failed to Set Gateway Addr\n"); 873 "BG_%d : Failed to Set Gateway Addr\n");
809 874
810 return rt_val; 875 return rt_val;
811} 876}
@@ -924,3 +989,150 @@ unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba)
924 spin_unlock(&ctrl->mbox_lock); 989 spin_unlock(&ctrl->mbox_lock);
925 return tag; 990 return tag;
926} 991}
992
993/**
994 * be_mgmt_get_boot_shandle()- Get the session handle
995 * @phba: device priv structure instance
996 * @s_handle: session handle returned for boot session.
997 *
998 * Get the boot target session handle. In case of
999 * crashdump mode driver has to issue and MBX Cmd
1000 * for FW to login to boot target
1001 *
1002 * return
1003 * Success: 0
1004 * Failure: Non-Zero value
1005 *
1006 **/
1007int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
1008 unsigned int *s_handle)
1009{
1010 struct be_cmd_get_boot_target_resp *boot_resp;
1011 struct be_mcc_wrb *wrb;
1012 unsigned int tag, wrb_num;
1013 uint8_t boot_retry = 3;
1014 unsigned short status, extd_status;
1015 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
1016
1017 do {
1018 /* Get the Boot Target Session Handle and Count*/
1019 tag = mgmt_get_boot_target(phba);
1020 if (!tag) {
1021 beiscsi_log(phba, KERN_ERR,
1022 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
1023 "BG_%d : Getting Boot Target Info Failed\n");
1024 return -EAGAIN;
1025 } else
1026 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
1027 phba->ctrl.mcc_numtag[tag]);
1028
1029 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
1030 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
1031 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
1032 if (status || extd_status) {
1033 beiscsi_log(phba, KERN_ERR,
1034 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
1035 "BG_%d : mgmt_get_boot_target Failed"
1036 " status = %d extd_status = %d\n",
1037 status, extd_status);
1038 free_mcc_tag(&phba->ctrl, tag);
1039 return -EBUSY;
1040 }
1041 wrb = queue_get_wrb(mccq, wrb_num);
1042 free_mcc_tag(&phba->ctrl, tag);
1043 boot_resp = embedded_payload(wrb);
1044
1045 /* Check if the there are any Boot targets configured */
1046 if (!boot_resp->boot_session_count) {
1047 beiscsi_log(phba, KERN_INFO,
1048 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
1049 "BG_%d ;No boot targets configured\n");
1050 return -ENXIO;
1051 }
1052
1053 /* FW returns the session handle of the boot session */
1054 if (boot_resp->boot_session_handle != INVALID_SESS_HANDLE) {
1055 *s_handle = boot_resp->boot_session_handle;
1056 return 0;
1057 }
1058
1059 /* Issue MBX Cmd to FW to login to the boot target */
1060 tag = mgmt_reopen_session(phba, BE_REOPEN_BOOT_SESSIONS,
1061 INVALID_SESS_HANDLE);
1062 if (!tag) {
1063 beiscsi_log(phba, KERN_ERR,
1064 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
1065 "BG_%d : mgmt_reopen_session Failed\n");
1066 return -EAGAIN;
1067 } else
1068 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
1069 phba->ctrl.mcc_numtag[tag]);
1070
1071 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
1072 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
1073 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
1074 if (status || extd_status) {
1075 beiscsi_log(phba, KERN_ERR,
1076 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
1077 "BG_%d : mgmt_reopen_session Failed"
1078 " status = %d extd_status = %d\n",
1079 status, extd_status);
1080 free_mcc_tag(&phba->ctrl, tag);
1081 return -EBUSY;
1082 }
1083 free_mcc_tag(&phba->ctrl, tag);
1084
1085 } while (--boot_retry);
1086
1087 /* Couldn't log into the boot target */
1088 beiscsi_log(phba, KERN_ERR,
1089 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
1090 "BG_%d : Login to Boot Target Failed\n");
1091 return -ENXIO;
1092}
1093
1094/**
1095 * mgmt_set_vlan()- Issue and wait for CMD completion
1096 * @phba: device private structure instance
1097 * @vlan_tag: VLAN tag
1098 *
1099 * Issue the MBX Cmd and wait for the completion of the
1100 * command.
1101 *
1102 * returns
1103 * Success: 0
1104 * Failure: Non-Xero Value
1105 **/
1106int mgmt_set_vlan(struct beiscsi_hba *phba,
1107 uint16_t vlan_tag)
1108{
1109 unsigned int tag, wrb_num;
1110 unsigned short status, extd_status;
1111
1112 tag = be_cmd_set_vlan(phba, vlan_tag);
1113 if (!tag) {
1114 beiscsi_log(phba, KERN_ERR,
1115 (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
1116 "BG_%d : VLAN Setting Failed\n");
1117 return -EBUSY;
1118 } else
1119 wait_event_interruptible(phba->ctrl.mcc_wait[tag],
1120 phba->ctrl.mcc_numtag[tag]);
1121
1122 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
1123 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
1124 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
1125
1126 if (status || extd_status) {
1127 beiscsi_log(phba, KERN_ERR,
1128 (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
1129 "BS_%d : status : %d extd_status : %d\n",
1130 status, extd_status);
1131
1132 free_mcc_tag(&phba->ctrl, tag);
1133 return -EAGAIN;
1134 }
1135
1136 free_mcc_tag(&phba->ctrl, tag);
1137 return 0;
1138}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 5c2e37693ca..c50cef6fec0 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -108,6 +108,7 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
108 struct bsg_job *job, 108 struct bsg_job *job,
109 struct be_dma_mem *nonemb_cmd); 109 struct be_dma_mem *nonemb_cmd);
110 110
111#define BEISCSI_NO_RST_ISSUE 0
111struct iscsi_invalidate_connection_params_in { 112struct iscsi_invalidate_connection_params_in {
112 struct be_cmd_req_hdr hdr; 113 struct be_cmd_req_hdr hdr;
113 unsigned int session_handle; 114 unsigned int session_handle;
@@ -274,6 +275,10 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
274 275
275unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba); 276unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba);
276 277
278unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
279 unsigned int reopen_type,
280 unsigned sess_handle);
281
277unsigned int mgmt_get_session_info(struct beiscsi_hba *phba, 282unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
278 u32 boot_session_handle, 283 u32 boot_session_handle,
279 struct be_dma_mem *nonemb_cmd); 284 struct be_dma_mem *nonemb_cmd);
@@ -290,4 +295,10 @@ int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
290int mgmt_set_gateway(struct beiscsi_hba *phba, 295int mgmt_set_gateway(struct beiscsi_hba *phba,
291 struct iscsi_iface_param_info *gateway_param); 296 struct iscsi_iface_param_info *gateway_param);
292 297
298int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
299 unsigned int *s_handle);
300
301unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba);
302
303int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
293#endif 304#endif
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 456e5762977..b7c326f7a6d 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -775,7 +775,8 @@ bfa_intx(struct bfa_s *bfa)
775 if (!intr) 775 if (!intr)
776 return BFA_TRUE; 776 return BFA_TRUE;
777 777
778 bfa_msix_lpu_err(bfa, intr); 778 if (bfa->intr_enabled)
779 bfa_msix_lpu_err(bfa, intr);
779 780
780 return BFA_TRUE; 781 return BFA_TRUE;
781} 782}
@@ -803,11 +804,17 @@ bfa_isr_enable(struct bfa_s *bfa)
803 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask); 804 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
804 bfa->iocfc.intr_mask = ~umsk; 805 bfa->iocfc.intr_mask = ~umsk;
805 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); 806 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
807
808 /*
809 * Set the flag indicating successful enabling of interrupts
810 */
811 bfa->intr_enabled = BFA_TRUE;
806} 812}
807 813
808void 814void
809bfa_isr_disable(struct bfa_s *bfa) 815bfa_isr_disable(struct bfa_s *bfa)
810{ 816{
817 bfa->intr_enabled = BFA_FALSE;
811 bfa_isr_mode_set(bfa, BFA_FALSE); 818 bfa_isr_mode_set(bfa, BFA_FALSE);
812 writel(-1L, bfa->iocfc.bfa_regs.intr_mask); 819 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
813 bfa_msix_uninstall(bfa); 820 bfa_msix_uninstall(bfa);
@@ -1022,7 +1029,7 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
1022{ 1029{
1023 u8 *dm_kva = NULL; 1030 u8 *dm_kva = NULL;
1024 u64 dm_pa = 0; 1031 u64 dm_pa = 0;
1025 int i, per_reqq_sz, per_rspq_sz, dbgsz; 1032 int i, per_reqq_sz, per_rspq_sz;
1026 struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1033 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1027 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa); 1034 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
1028 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa); 1035 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
@@ -1083,11 +1090,8 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
1083 BFA_CACHELINE_SZ); 1090 BFA_CACHELINE_SZ);
1084 1091
1085 /* Claim IOCFC kva memory */ 1092 /* Claim IOCFC kva memory */
1086 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0; 1093 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
1087 if (dbgsz > 0) { 1094 bfa_mem_kva_curp(iocfc) += BFA_DBG_FWTRC_LEN;
1088 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
1089 bfa_mem_kva_curp(iocfc) += dbgsz;
1090 }
1091} 1095}
1092 1096
1093/* 1097/*
@@ -1429,8 +1433,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1429 bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len); 1433 bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
1430 1434
1431 /* kva memory setup for IOCFC */ 1435 /* kva memory setup for IOCFC */
1432 bfa_mem_kva_setup(meminfo, iocfc_kva, 1436 bfa_mem_kva_setup(meminfo, iocfc_kva, BFA_DBG_FWTRC_LEN);
1433 ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0));
1434} 1437}
1435 1438
1436/* 1439/*
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index 12bfeed268e..91a8aa394db 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -168,7 +168,7 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
168/* 168/*
169 * bfa_q_deq - dequeue an element from head of the queue 169 * bfa_q_deq - dequeue an element from head of the queue
170 */ 170 */
171#define bfa_q_deq(_q, _qe) { \ 171#define bfa_q_deq(_q, _qe) do { \
172 if (!list_empty(_q)) { \ 172 if (!list_empty(_q)) { \
173 (*((struct list_head **) (_qe))) = bfa_q_next(_q); \ 173 (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
174 bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \ 174 bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
@@ -177,7 +177,7 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
177 } else { \ 177 } else { \
178 *((struct list_head **) (_qe)) = (struct list_head *) NULL;\ 178 *((struct list_head **) (_qe)) = (struct list_head *) NULL;\
179 } \ 179 } \
180} 180} while (0)
181 181
182/* 182/*
183 * bfa_q_deq_tail - dequeue an element from tail of the queue 183 * bfa_q_deq_tail - dequeue an element from tail of the queue
diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h
index 3bbc583f65c..06f0a163ca3 100644
--- a/drivers/scsi/bfa/bfa_defs_fcs.h
+++ b/drivers/scsi/bfa/bfa_defs_fcs.h
@@ -93,6 +93,7 @@ struct bfa_lport_cfg_s {
93 wwn_t pwwn; /* port wwn */ 93 wwn_t pwwn; /* port wwn */
94 wwn_t nwwn; /* node wwn */ 94 wwn_t nwwn; /* node wwn */
95 struct bfa_lport_symname_s sym_name; /* vm port symbolic name */ 95 struct bfa_lport_symname_s sym_name; /* vm port symbolic name */
96 struct bfa_lport_symname_s node_sym_name; /* Node symbolic name */
96 enum bfa_lport_role roles; /* FCS port roles */ 97 enum bfa_lport_role roles; /* FCS port roles */
97 u32 rsvd; 98 u32 rsvd;
98 bfa_boolean_t preboot_vp; /* vport created from PBC */ 99 bfa_boolean_t preboot_vp; /* vport created from PBC */
@@ -192,6 +193,18 @@ struct bfa_lport_stats_s {
192 u32 ns_gidft_unknown_rsp; 193 u32 ns_gidft_unknown_rsp;
193 u32 ns_gidft_alloc_wait; 194 u32 ns_gidft_alloc_wait;
194 195
196 u32 ns_rnnid_sent;
197 u32 ns_rnnid_accepts;
198 u32 ns_rnnid_rsp_err;
199 u32 ns_rnnid_rejects;
200 u32 ns_rnnid_alloc_wait;
201
202 u32 ns_rsnn_nn_sent;
203 u32 ns_rsnn_nn_accepts;
204 u32 ns_rsnn_nn_rsp_err;
205 u32 ns_rsnn_nn_rejects;
206 u32 ns_rsnn_nn_alloc_wait;
207
195 /* 208 /*
196 * Mgmt Server stats 209 * Mgmt Server stats
197 */ 210 */
@@ -410,6 +423,11 @@ struct bfa_rport_remote_link_stats_s {
410 u32 icc; /* Invalid CRC Count */ 423 u32 icc; /* Invalid CRC Count */
411}; 424};
412 425
426struct bfa_rport_qualifier_s {
427 wwn_t pwwn; /* Port WWN */
428 u32 pid; /* port ID */
429 u32 rsvd;
430};
413 431
414#define BFA_MAX_IO_INDEX 7 432#define BFA_MAX_IO_INDEX 7
415#define BFA_NO_IO_INDEX 9 433#define BFA_NO_IO_INDEX 9
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 8d0b88f67a3..e0beb4d7e26 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -1279,6 +1279,7 @@ enum {
1279 GS_GSPN_ID = 0x0118, /* Get symbolic PN on ID */ 1279 GS_GSPN_ID = 0x0118, /* Get symbolic PN on ID */
1280 GS_RFT_ID = 0x0217, /* Register fc4type on ID */ 1280 GS_RFT_ID = 0x0217, /* Register fc4type on ID */
1281 GS_RSPN_ID = 0x0218, /* Register symbolic PN on ID */ 1281 GS_RSPN_ID = 0x0218, /* Register symbolic PN on ID */
1282 GS_RSNN_NN = 0x0239, /* Register symbolic NN on NN */
1282 GS_RPN_ID = 0x0212, /* Register port name */ 1283 GS_RPN_ID = 0x0212, /* Register port name */
1283 GS_RNN_ID = 0x0213, /* Register node name */ 1284 GS_RNN_ID = 0x0213, /* Register node name */
1284 GS_RCS_ID = 0x0214, /* Register class of service */ 1285 GS_RCS_ID = 0x0214, /* Register class of service */
@@ -1357,6 +1358,15 @@ struct fcgs_rspnid_req_s {
1357}; 1358};
1358 1359
1359/* 1360/*
1361 * RSNN_NN
1362 */
1363struct fcgs_rsnn_nn_req_s {
1364 wwn_t node_name; /* Node name */
1365 u8 snn_len; /* symbolic node name length */
1366 u8 snn[256]; /* symbolic node name */
1367};
1368
1369/*
1360 * RPN_ID 1370 * RPN_ID
1361 */ 1371 */
1362struct fcgs_rpnid_req_s { 1372struct fcgs_rpnid_req_s {
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index 17b59b8b564..273cee90b3b 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1252,6 +1252,27 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
1252} 1252}
1253 1253
1254u16 1254u16
1255fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
1256 wwn_t node_name, u8 *name)
1257{
1258 struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
1259 struct fcgs_rsnn_nn_req_s *rsnn_nn =
1260 (struct fcgs_rsnn_nn_req_s *) (cthdr + 1);
1261 u32 d_id = bfa_hton3b(FC_NAME_SERVER);
1262
1263 fc_gs_fchdr_build(fchs, d_id, s_id, 0);
1264 fc_gs_cthdr_build(cthdr, s_id, GS_RSNN_NN);
1265
1266 memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
1267
1268 rsnn_nn->node_name = node_name;
1269 rsnn_nn->snn_len = (u8) strlen((char *)name);
1270 strncpy((char *)rsnn_nn->snn, (char *)name, rsnn_nn->snn_len);
1271
1272 return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
1273}
1274
1275u16
1255fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type) 1276fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
1256{ 1277{
1257 1278
diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h
index 42cd9d4da69..03c753d1e54 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.h
+++ b/drivers/scsi/bfa/bfa_fcbuild.h
@@ -166,6 +166,8 @@ enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
166 166
167u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id, 167u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
168 u16 ox_id, u8 *name); 168 u16 ox_id, u8 *name);
169u16 fc_rsnn_nn_build(struct fchs_s *fchs, void *pld, u32 s_id,
170 wwn_t node_name, u8 *name);
169 171
170u16 fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id, 172u16 fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id,
171 u16 ox_id, enum bfa_lport_role role); 173 u16 ox_id, enum bfa_lport_role role);
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index f0f80e282e3..1633963c66c 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -1466,7 +1466,13 @@ bfa_status_t
1466bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim, 1466bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1467 struct bfa_itnim_ioprofile_s *ioprofile) 1467 struct bfa_itnim_ioprofile_s *ioprofile)
1468{ 1468{
1469 struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa); 1469 struct bfa_fcpim_s *fcpim;
1470
1471 if (!itnim)
1472 return BFA_STATUS_NO_FCPIM_NEXUS;
1473
1474 fcpim = BFA_FCPIM(itnim->bfa);
1475
1470 if (!fcpim->io_profile) 1476 if (!fcpim->io_profile)
1471 return BFA_STATUS_IOPROFILE_OFF; 1477 return BFA_STATUS_IOPROFILE_OFF;
1472 1478
@@ -1484,6 +1490,10 @@ void
1484bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) 1490bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1485{ 1491{
1486 int j; 1492 int j;
1493
1494 if (!itnim)
1495 return;
1496
1487 memset(&itnim->stats, 0, sizeof(itnim->stats)); 1497 memset(&itnim->stats, 0, sizeof(itnim->stats));
1488 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile)); 1498 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
1489 for (j = 0; j < BFA_IOBUCKET_MAX; j++) 1499 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index eaac57e1dde..fd3e84d32bd 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -76,6 +76,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
76 fcs->bfa = bfa; 76 fcs->bfa = bfa;
77 fcs->bfad = bfad; 77 fcs->bfad = bfad;
78 fcs->min_cfg = min_cfg; 78 fcs->min_cfg = min_cfg;
79 fcs->num_rport_logins = 0;
79 80
80 bfa->fcs = BFA_TRUE; 81 bfa->fcs = BFA_TRUE;
81 fcbuild_init(); 82 fcbuild_init();
@@ -119,6 +120,18 @@ bfa_fcs_update_cfg(struct bfa_fcs_s *fcs)
119} 120}
120 121
121/* 122/*
123 * Stop FCS operations.
124 */
125void
126bfa_fcs_stop(struct bfa_fcs_s *fcs)
127{
128 bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
129 bfa_wc_up(&fcs->wc);
130 bfa_fcs_fabric_modstop(fcs);
131 bfa_wc_wait(&fcs->wc);
132}
133
134/*
122 * fcs pbc vport initialization 135 * fcs pbc vport initialization
123 */ 136 */
124void 137void
@@ -153,6 +166,7 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
153 fcs->driver_info = *driver_info; 166 fcs->driver_info = *driver_info;
154 167
155 bfa_fcs_fabric_psymb_init(&fcs->fabric); 168 bfa_fcs_fabric_psymb_init(&fcs->fabric);
169 bfa_fcs_fabric_nsymb_init(&fcs->fabric);
156} 170}
157 171
158/* 172/*
@@ -213,6 +227,8 @@ static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric);
213static void bfa_fcs_fabric_delay(void *cbarg); 227static void bfa_fcs_fabric_delay(void *cbarg);
214static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric); 228static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric);
215static void bfa_fcs_fabric_delete_comp(void *cbarg); 229static void bfa_fcs_fabric_delete_comp(void *cbarg);
230static void bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric);
231static void bfa_fcs_fabric_stop_comp(void *cbarg);
216static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, 232static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric,
217 struct fchs_s *fchs, u16 len); 233 struct fchs_s *fchs, u16 len);
218static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, 234static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
@@ -250,6 +266,10 @@ static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
250 enum bfa_fcs_fabric_event event); 266 enum bfa_fcs_fabric_event event);
251static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, 267static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
252 enum bfa_fcs_fabric_event event); 268 enum bfa_fcs_fabric_event event);
269static void bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric,
270 enum bfa_fcs_fabric_event event);
271static void bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric,
272 enum bfa_fcs_fabric_event event);
253/* 273/*
254 * Beginning state before fabric creation. 274 * Beginning state before fabric creation.
255 */ 275 */
@@ -334,6 +354,11 @@ bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
334 bfa_fcs_fabric_delete(fabric); 354 bfa_fcs_fabric_delete(fabric);
335 break; 355 break;
336 356
357 case BFA_FCS_FABRIC_SM_STOP:
358 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
359 bfa_fcs_fabric_stop(fabric);
360 break;
361
337 default: 362 default:
338 bfa_sm_fault(fabric->fcs, event); 363 bfa_sm_fault(fabric->fcs, event);
339 } 364 }
@@ -585,6 +610,11 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
585 bfa_fcs_fabric_delete(fabric); 610 bfa_fcs_fabric_delete(fabric);
586 break; 611 break;
587 612
613 case BFA_FCS_FABRIC_SM_STOP:
614 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_stopping);
615 bfa_fcs_fabric_stop(fabric);
616 break;
617
588 case BFA_FCS_FABRIC_SM_AUTH_FAILED: 618 case BFA_FCS_FABRIC_SM_AUTH_FAILED:
589 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); 619 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
590 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); 620 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE);
@@ -682,7 +712,62 @@ bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
682 } 712 }
683} 713}
684 714
715/*
716 * Fabric is being stopped, awaiting vport stop completions.
717 */
718static void
719bfa_fcs_fabric_sm_stopping(struct bfa_fcs_fabric_s *fabric,
720 enum bfa_fcs_fabric_event event)
721{
722 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
723 bfa_trc(fabric->fcs, event);
724
725 switch (event) {
726 case BFA_FCS_FABRIC_SM_STOPCOMP:
727 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
728 bfa_sm_send_event(fabric->lps, BFA_LPS_SM_LOGOUT);
729 break;
730
731 case BFA_FCS_FABRIC_SM_LINK_UP:
732 break;
733
734 case BFA_FCS_FABRIC_SM_LINK_DOWN:
735 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_cleanup);
736 break;
737
738 default:
739 bfa_sm_fault(fabric->fcs, event);
740 }
741}
742
743/*
744 * Fabric is being stopped, cleanup without FLOGO
745 */
746static void
747bfa_fcs_fabric_sm_cleanup(struct bfa_fcs_fabric_s *fabric,
748 enum bfa_fcs_fabric_event event)
749{
750 bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
751 bfa_trc(fabric->fcs, event);
685 752
753 switch (event) {
754 case BFA_FCS_FABRIC_SM_STOPCOMP:
755 case BFA_FCS_FABRIC_SM_LOGOCOMP:
756 bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
757 bfa_wc_down(&(fabric->fcs)->wc);
758 break;
759
760 case BFA_FCS_FABRIC_SM_LINK_DOWN:
761 /*
762 * Ignore - can get this event if we get notified about IOC down
763 * before the fabric completion callbk is done.
764 */
765 break;
766
767 default:
768 bfa_sm_fault(fabric->fcs, event);
769 }
770}
686 771
687/* 772/*
688 * fcs_fabric_private fabric private functions 773 * fcs_fabric_private fabric private functions
@@ -760,6 +845,44 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
760} 845}
761 846
762/* 847/*
848 * Node Symbolic Name Creation for base port and all vports
849 */
850void
851bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
852{
853 struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
854 char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
855 struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
856
857 bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
858
859 /* Model name/number */
860 strncpy((char *)&port_cfg->node_sym_name, model,
861 BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
862 strncat((char *)&port_cfg->node_sym_name,
863 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
864 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
865
866 /* Driver Version */
867 strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->version,
868 BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
869 strncat((char *)&port_cfg->node_sym_name,
870 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
871 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
872
873 /* Host machine name */
874 strncat((char *)&port_cfg->node_sym_name,
875 (char *)driver_info->host_machine_name,
876 BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
877 strncat((char *)&port_cfg->node_sym_name,
878 BFA_FCS_PORT_SYMBNAME_SEPARATOR,
879 sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
880
881 /* null terminate */
882 port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
883}
884
885/*
763 * bfa lps login completion callback 886 * bfa lps login completion callback
764 */ 887 */
765void 888void
@@ -919,6 +1042,28 @@ bfa_fcs_fabric_delay(void *cbarg)
919} 1042}
920 1043
921/* 1044/*
1045 * Stop all vports and wait for vport stop completions.
1046 */
1047static void
1048bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric)
1049{
1050 struct bfa_fcs_vport_s *vport;
1051 struct list_head *qe, *qen;
1052
1053 bfa_wc_init(&fabric->stop_wc, bfa_fcs_fabric_stop_comp, fabric);
1054
1055 list_for_each_safe(qe, qen, &fabric->vport_q) {
1056 vport = (struct bfa_fcs_vport_s *) qe;
1057 bfa_wc_up(&fabric->stop_wc);
1058 bfa_fcs_vport_fcs_stop(vport);
1059 }
1060
1061 bfa_wc_up(&fabric->stop_wc);
1062 bfa_fcs_lport_stop(&fabric->bport);
1063 bfa_wc_wait(&fabric->stop_wc);
1064}
1065
1066/*
922 * Computes operating BB_SCN value 1067 * Computes operating BB_SCN value
923 */ 1068 */
924static u8 1069static u8
@@ -978,6 +1123,14 @@ bfa_fcs_fabric_delete_comp(void *cbarg)
978 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP); 1123 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
979} 1124}
980 1125
1126static void
1127bfa_fcs_fabric_stop_comp(void *cbarg)
1128{
1129 struct bfa_fcs_fabric_s *fabric = cbarg;
1130
1131 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOPCOMP);
1132}
1133
981/* 1134/*
982 * fcs_fabric_public fabric public functions 1135 * fcs_fabric_public fabric public functions
983 */ 1136 */
@@ -1039,6 +1192,19 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
1039} 1192}
1040 1193
1041/* 1194/*
1195 * Fabric module stop -- stop FCS actions
1196 */
1197void
1198bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs)
1199{
1200 struct bfa_fcs_fabric_s *fabric;
1201
1202 bfa_trc(fcs, 0);
1203 fabric = &fcs->fabric;
1204 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_STOP);
1205}
1206
1207/*
1042 * Fabric module start -- kick starts FCS actions 1208 * Fabric module start -- kick starts FCS actions
1043 */ 1209 */
1044void 1210void
@@ -1219,8 +1385,11 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
1219 return; 1385 return;
1220 } 1386 }
1221 } 1387 }
1222 bfa_trc(fabric->fcs, els_cmd->els_code); 1388
1223 bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); 1389 if (!bfa_fcs_fabric_is_switched(fabric))
1390 bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
1391
1392 bfa_trc(fabric->fcs, fchs->type);
1224} 1393}
1225 1394
1226/* 1395/*
@@ -1294,7 +1463,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
1294 u16 reqlen; 1463 u16 reqlen;
1295 struct fchs_s fchs; 1464 struct fchs_s fchs;
1296 1465
1297 fcxp = bfa_fcs_fcxp_alloc(fabric->fcs); 1466 fcxp = bfa_fcs_fcxp_alloc(fabric->fcs, BFA_FALSE);
1298 /* 1467 /*
1299 * Do not expect this failure -- expect remote node to retry 1468 * Do not expect this failure -- expect remote node to retry
1300 */ 1469 */
@@ -1387,6 +1556,13 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
1387 } 1556 }
1388} 1557}
1389 1558
1559void
1560bfa_cb_lps_flogo_comp(void *bfad, void *uarg)
1561{
1562 struct bfa_fcs_fabric_s *fabric = uarg;
1563 bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOGOCOMP);
1564}
1565
1390/* 1566/*
1391 * Returns FCS vf structure for a given vf_id. 1567 * Returns FCS vf structure for a given vf_id.
1392 * 1568 *
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 51c9e134571..6c4377cb287 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -62,9 +62,9 @@ struct bfa_fcs_s;
62#define N2N_LOCAL_PID 0x010000 62#define N2N_LOCAL_PID 0x010000
63#define N2N_REMOTE_PID 0x020000 63#define N2N_REMOTE_PID 0x020000
64#define BFA_FCS_RETRY_TIMEOUT 2000 64#define BFA_FCS_RETRY_TIMEOUT 2000
65#define BFA_FCS_MAX_NS_RETRIES 5
65#define BFA_FCS_PID_IS_WKA(pid) ((bfa_ntoh3b(pid) > 0xFFF000) ? 1 : 0) 66#define BFA_FCS_PID_IS_WKA(pid) ((bfa_ntoh3b(pid) > 0xFFF000) ? 1 : 0)
66 67#define BFA_FCS_MAX_RPORT_LOGINS 1024
67
68 68
69struct bfa_fcs_lport_ns_s { 69struct bfa_fcs_lport_ns_s {
70 bfa_sm_t sm; /* state machine */ 70 bfa_sm_t sm; /* state machine */
@@ -72,6 +72,8 @@ struct bfa_fcs_lport_ns_s {
72 struct bfa_fcs_lport_s *port; /* parent port */ 72 struct bfa_fcs_lport_s *port; /* parent port */
73 struct bfa_fcxp_s *fcxp; 73 struct bfa_fcxp_s *fcxp;
74 struct bfa_fcxp_wqe_s fcxp_wqe; 74 struct bfa_fcxp_wqe_s fcxp_wqe;
75 u8 num_rnnid_retries;
76 u8 num_rsnn_nn_retries;
75}; 77};
76 78
77 79
@@ -205,6 +207,7 @@ struct bfa_fcs_fabric_s {
205 struct bfa_lps_s *lps; /* lport login services */ 207 struct bfa_lps_s *lps; /* lport login services */
206 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; 208 u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ];
207 /* attached fabric's ip addr */ 209 /* attached fabric's ip addr */
210 struct bfa_wc_s stop_wc; /* wait counter for stop */
208}; 211};
209 212
210#define bfa_fcs_fabric_npiv_capable(__f) ((__f)->is_npiv) 213#define bfa_fcs_fabric_npiv_capable(__f) ((__f)->is_npiv)
@@ -264,6 +267,7 @@ struct bfa_fcs_fabric_s;
264#define bfa_fcs_lport_get_pwwn(_lport) ((_lport)->port_cfg.pwwn) 267#define bfa_fcs_lport_get_pwwn(_lport) ((_lport)->port_cfg.pwwn)
265#define bfa_fcs_lport_get_nwwn(_lport) ((_lport)->port_cfg.nwwn) 268#define bfa_fcs_lport_get_nwwn(_lport) ((_lport)->port_cfg.nwwn)
266#define bfa_fcs_lport_get_psym_name(_lport) ((_lport)->port_cfg.sym_name) 269#define bfa_fcs_lport_get_psym_name(_lport) ((_lport)->port_cfg.sym_name)
270#define bfa_fcs_lport_get_nsym_name(_lport) ((_lport)->port_cfg.node_sym_name)
267#define bfa_fcs_lport_is_initiator(_lport) \ 271#define bfa_fcs_lport_is_initiator(_lport) \
268 ((_lport)->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM) 272 ((_lport)->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM)
269#define bfa_fcs_lport_get_nrports(_lport) \ 273#define bfa_fcs_lport_get_nrports(_lport) \
@@ -286,9 +290,8 @@ bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port)
286 290
287bfa_boolean_t bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port); 291bfa_boolean_t bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port);
288struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs); 292struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs);
289void bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port, 293void bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port,
290 wwn_t rport_wwns[], int *nrports); 294 struct bfa_rport_qualifier_s rport[], int *nrports);
291
292wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, 295wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn,
293 int index, int nrports, bfa_boolean_t bwwn); 296 int index, int nrports, bfa_boolean_t bwwn);
294 297
@@ -324,12 +327,17 @@ void bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
324void bfa_fcs_lport_online(struct bfa_fcs_lport_s *port); 327void bfa_fcs_lport_online(struct bfa_fcs_lport_s *port);
325void bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port); 328void bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port);
326void bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port); 329void bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port);
330void bfa_fcs_lport_stop(struct bfa_fcs_lport_s *port);
327struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pid( 331struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pid(
328 struct bfa_fcs_lport_s *port, u32 pid); 332 struct bfa_fcs_lport_s *port, u32 pid);
333struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_old_pid(
334 struct bfa_fcs_lport_s *port, u32 pid);
329struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn( 335struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn(
330 struct bfa_fcs_lport_s *port, wwn_t pwwn); 336 struct bfa_fcs_lport_s *port, wwn_t pwwn);
331struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn( 337struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn(
332 struct bfa_fcs_lport_s *port, wwn_t nwwn); 338 struct bfa_fcs_lport_s *port, wwn_t nwwn);
339struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_qualifier(
340 struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 pid);
333void bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port, 341void bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port,
334 struct bfa_fcs_rport_s *rport); 342 struct bfa_fcs_rport_s *rport);
335void bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port, 343void bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port,
@@ -338,6 +346,8 @@ void bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport);
338void bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport); 346void bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport);
339void bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport); 347void bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport);
340void bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port); 348void bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port);
349void bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg,
350 struct bfa_fcxp_s *fcxp_alloced);
341void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport); 351void bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport);
342void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport); 352void bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport);
343void bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *vport); 353void bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *vport);
@@ -382,6 +392,7 @@ void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
382void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport); 392void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
383void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport); 393void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
384void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport); 394void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
395void bfa_fcs_vport_fcs_stop(struct bfa_fcs_vport_s *vport);
385void bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport); 396void bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport);
386 397
387#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */ 398#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */
@@ -419,6 +430,7 @@ struct bfa_fcs_rport_s {
419 struct bfa_fcs_s *fcs; /* fcs instance */ 430 struct bfa_fcs_s *fcs; /* fcs instance */
420 struct bfad_rport_s *rp_drv; /* driver peer instance */ 431 struct bfad_rport_s *rp_drv; /* driver peer instance */
421 u32 pid; /* port ID of rport */ 432 u32 pid; /* port ID of rport */
433 u32 old_pid; /* PID before rport goes offline */
422 u16 maxfrsize; /* maximum frame size */ 434 u16 maxfrsize; /* maximum frame size */
423 __be16 reply_oxid; /* OX_ID of inbound requests */ 435 __be16 reply_oxid; /* OX_ID of inbound requests */
424 enum fc_cos fc_cos; /* FC classes of service supp */ 436 enum fc_cos fc_cos; /* FC classes of service supp */
@@ -459,7 +471,7 @@ struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
459struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn( 471struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
460 struct bfa_fcs_lport_s *port, wwn_t rnwwn); 472 struct bfa_fcs_lport_s *port, wwn_t rnwwn);
461void bfa_fcs_rport_set_del_timeout(u8 rport_tmo); 473void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
462 474void bfa_fcs_rport_set_max_logins(u32 max_logins);
463void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, 475void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
464 struct fchs_s *fchs, u16 len); 476 struct fchs_s *fchs, u16 len);
465void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport); 477void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
@@ -505,12 +517,13 @@ struct bfa_fcs_itnim_s {
505 struct bfa_fcxp_s *fcxp; /* FCXP in use */ 517 struct bfa_fcxp_s *fcxp; /* FCXP in use */
506 struct bfa_itnim_stats_s stats; /* itn statistics */ 518 struct bfa_itnim_stats_s stats; /* itn statistics */
507}; 519};
508#define bfa_fcs_fcxp_alloc(__fcs) \ 520#define bfa_fcs_fcxp_alloc(__fcs, __req) \
509 bfa_fcxp_alloc(NULL, (__fcs)->bfa, 0, 0, NULL, NULL, NULL, NULL) 521 bfa_fcxp_req_rsp_alloc(NULL, (__fcs)->bfa, 0, 0, \
510 522 NULL, NULL, NULL, NULL, __req)
511#define bfa_fcs_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg) \ 523#define bfa_fcs_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, \
512 bfa_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg, \ 524 __alloc_cbarg, __req) \
513 NULL, 0, 0, NULL, NULL, NULL, NULL) 525 bfa_fcxp_req_rsp_alloc_wait(__bfa, __wqe, __alloc_cbfn, \
526 __alloc_cbarg, NULL, 0, 0, NULL, NULL, NULL, NULL, __req)
514 527
515static inline struct bfad_port_s * 528static inline struct bfad_port_s *
516bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim) 529bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim)
@@ -592,7 +605,7 @@ bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port,
592struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport); 605struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport);
593void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim); 606void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim);
594void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim); 607void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim);
595void bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim); 608void bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim);
596bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim); 609bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim);
597void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim); 610void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
598void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, 611void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
@@ -676,6 +689,7 @@ struct bfa_fcs_s {
676 struct bfa_fcs_stats_s stats; /* FCS statistics */ 689 struct bfa_fcs_stats_s stats; /* FCS statistics */
677 struct bfa_wc_s wc; /* waiting counter */ 690 struct bfa_wc_s wc; /* waiting counter */
678 int fcs_aen_seq; 691 int fcs_aen_seq;
692 u32 num_rport_logins;
679}; 693};
680 694
681/* 695/*
@@ -702,6 +716,9 @@ enum bfa_fcs_fabric_event {
702 BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */ 716 BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */
703 BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */ 717 BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */
704 BFA_FCS_FABRIC_SM_START = 16, /* from driver */ 718 BFA_FCS_FABRIC_SM_START = 16, /* from driver */
719 BFA_FCS_FABRIC_SM_STOP = 17, /* Stop from driver */
720 BFA_FCS_FABRIC_SM_STOPCOMP = 18, /* Stop completion */
721 BFA_FCS_FABRIC_SM_LOGOCOMP = 19, /* FLOGO completion */
705}; 722};
706 723
707/* 724/*
@@ -727,6 +744,26 @@ enum rport_event {
727 RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */ 744 RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */
728 RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */ 745 RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */
729 RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */ 746 RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continuously */
747 RPSM_EVENT_FC4_FCS_ONLINE = 19, /*!< FC-4 FCS online complete */
748};
749
750/*
751 * fcs_itnim_sm FCS itnim state machine events
752 */
753enum bfa_fcs_itnim_event {
754 BFA_FCS_ITNIM_SM_FCS_ONLINE = 1, /* rport online event */
755 BFA_FCS_ITNIM_SM_OFFLINE = 2, /* rport offline */
756 BFA_FCS_ITNIM_SM_FRMSENT = 3, /* prli frame is sent */
757 BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */
758 BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */
759 BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */
760 BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */
761 BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */
762 BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
763 BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
764 BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
765 BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
766 BFA_FCS_ITNIM_SM_HAL_ONLINE = 13, /*!< bfa rport online event */
730}; 767};
731 768
732/* 769/*
@@ -741,6 +778,7 @@ void bfa_fcs_update_cfg(struct bfa_fcs_s *fcs);
741void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, 778void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
742 struct bfa_fcs_driver_info_s *driver_info); 779 struct bfa_fcs_driver_info_s *driver_info);
743void bfa_fcs_exit(struct bfa_fcs_s *fcs); 780void bfa_fcs_exit(struct bfa_fcs_s *fcs);
781void bfa_fcs_stop(struct bfa_fcs_s *fcs);
744 782
745/* 783/*
746 * bfa fcs vf public functions 784 * bfa fcs vf public functions
@@ -766,11 +804,13 @@ void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
766void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, 804void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
767 struct fchs_s *fchs, u16 len); 805 struct fchs_s *fchs, u16 len);
768void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric); 806void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
807void bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric);
769void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, 808void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
770 wwn_t fabric_name); 809 wwn_t fabric_name);
771u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric); 810u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
772void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs); 811void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
773void bfa_fcs_port_attach(struct bfa_fcs_s *fcs); 812void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
813void bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs);
774void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, 814void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
775 enum bfa_fcs_fabric_event event); 815 enum bfa_fcs_fabric_event event);
776void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, 816void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 9272840a240..6dc7926a3ed 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -40,25 +40,6 @@ static void bfa_fcs_itnim_prli_response(void *fcsarg,
40static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim, 40static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
41 enum bfa_itnim_aen_event event); 41 enum bfa_itnim_aen_event event);
42 42
43/*
44 * fcs_itnim_sm FCS itnim state machine events
45 */
46
47enum bfa_fcs_itnim_event {
48 BFA_FCS_ITNIM_SM_ONLINE = 1, /* rport online event */
49 BFA_FCS_ITNIM_SM_OFFLINE = 2, /* rport offline */
50 BFA_FCS_ITNIM_SM_FRMSENT = 3, /* prli frame is sent */
51 BFA_FCS_ITNIM_SM_RSP_OK = 4, /* good response */
52 BFA_FCS_ITNIM_SM_RSP_ERROR = 5, /* error response */
53 BFA_FCS_ITNIM_SM_TIMEOUT = 6, /* delay timeout */
54 BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /* BFA online callback */
55 BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /* BFA offline callback */
56 BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
57 BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
58 BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
59 BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
60};
61
62static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, 43static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
63 enum bfa_fcs_itnim_event event); 44 enum bfa_fcs_itnim_event event);
64static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, 45static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
@@ -69,6 +50,8 @@ static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
69 enum bfa_fcs_itnim_event event); 50 enum bfa_fcs_itnim_event event);
70static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, 51static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
71 enum bfa_fcs_itnim_event event); 52 enum bfa_fcs_itnim_event event);
53static void bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim,
54 enum bfa_fcs_itnim_event event);
72static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, 55static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
73 enum bfa_fcs_itnim_event event); 56 enum bfa_fcs_itnim_event event);
74static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, 57static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
@@ -99,7 +82,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
99 bfa_trc(itnim->fcs, event); 82 bfa_trc(itnim->fcs, event);
100 83
101 switch (event) { 84 switch (event) {
102 case BFA_FCS_ITNIM_SM_ONLINE: 85 case BFA_FCS_ITNIM_SM_FCS_ONLINE:
103 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); 86 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
104 itnim->prli_retries = 0; 87 itnim->prli_retries = 0;
105 bfa_fcs_itnim_send_prli(itnim, NULL); 88 bfa_fcs_itnim_send_prli(itnim, NULL);
@@ -138,6 +121,7 @@ bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
138 case BFA_FCS_ITNIM_SM_INITIATOR: 121 case BFA_FCS_ITNIM_SM_INITIATOR:
139 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); 122 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
140 bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); 123 bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
124 bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
141 break; 125 break;
142 126
143 case BFA_FCS_ITNIM_SM_OFFLINE: 127 case BFA_FCS_ITNIM_SM_OFFLINE:
@@ -166,12 +150,13 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
166 150
167 switch (event) { 151 switch (event) {
168 case BFA_FCS_ITNIM_SM_RSP_OK: 152 case BFA_FCS_ITNIM_SM_RSP_OK:
169 if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR) { 153 if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR)
170 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); 154 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
171 } else { 155 else
172 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online); 156 bfa_sm_set_state(itnim,
173 bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec); 157 bfa_fcs_itnim_sm_hal_rport_online);
174 } 158
159 bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
175 break; 160 break;
176 161
177 case BFA_FCS_ITNIM_SM_RSP_ERROR: 162 case BFA_FCS_ITNIM_SM_RSP_ERROR:
@@ -194,6 +179,7 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
194 case BFA_FCS_ITNIM_SM_INITIATOR: 179 case BFA_FCS_ITNIM_SM_INITIATOR:
195 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); 180 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
196 bfa_fcxp_discard(itnim->fcxp); 181 bfa_fcxp_discard(itnim->fcxp);
182 bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
197 break; 183 break;
198 184
199 case BFA_FCS_ITNIM_SM_DELETE: 185 case BFA_FCS_ITNIM_SM_DELETE:
@@ -208,6 +194,44 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
208} 194}
209 195
210static void 196static void
197bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim,
198 enum bfa_fcs_itnim_event event)
199{
200 bfa_trc(itnim->fcs, itnim->rport->pwwn);
201 bfa_trc(itnim->fcs, event);
202
203 switch (event) {
204 case BFA_FCS_ITNIM_SM_HAL_ONLINE:
205 if (!itnim->bfa_itnim)
206 itnim->bfa_itnim = bfa_itnim_create(itnim->fcs->bfa,
207 itnim->rport->bfa_rport, itnim);
208
209 if (itnim->bfa_itnim) {
210 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online);
211 bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec);
212 } else {
213 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
214 bfa_sm_send_event(itnim->rport, RPSM_EVENT_DELETE);
215 }
216
217 break;
218
219 case BFA_FCS_ITNIM_SM_OFFLINE:
220 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
221 bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
222 break;
223
224 case BFA_FCS_ITNIM_SM_DELETE:
225 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
226 bfa_fcs_itnim_free(itnim);
227 break;
228
229 default:
230 bfa_sm_fault(itnim->fcs, event);
231 }
232}
233
234static void
211bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, 235bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
212 enum bfa_fcs_itnim_event event) 236 enum bfa_fcs_itnim_event event)
213{ 237{
@@ -238,6 +262,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
238 case BFA_FCS_ITNIM_SM_INITIATOR: 262 case BFA_FCS_ITNIM_SM_INITIATOR:
239 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); 263 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
240 bfa_timer_stop(&itnim->timer); 264 bfa_timer_stop(&itnim->timer);
265 bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
241 break; 266 break;
242 267
243 case BFA_FCS_ITNIM_SM_DELETE: 268 case BFA_FCS_ITNIM_SM_DELETE:
@@ -275,9 +300,8 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
275 break; 300 break;
276 301
277 case BFA_FCS_ITNIM_SM_OFFLINE: 302 case BFA_FCS_ITNIM_SM_OFFLINE:
278 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); 303 bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
279 bfa_itnim_offline(itnim->bfa_itnim); 304 bfa_itnim_offline(itnim->bfa_itnim);
280 bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
281 break; 305 break;
282 306
283 case BFA_FCS_ITNIM_SM_DELETE: 307 case BFA_FCS_ITNIM_SM_DELETE:
@@ -372,8 +396,14 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
372 bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); 396 bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
373 break; 397 break;
374 398
399 /*
400 * fcs_online is expected here for well known initiator ports
401 */
402 case BFA_FCS_ITNIM_SM_FCS_ONLINE:
403 bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
404 break;
405
375 case BFA_FCS_ITNIM_SM_RSP_ERROR: 406 case BFA_FCS_ITNIM_SM_RSP_ERROR:
376 case BFA_FCS_ITNIM_SM_ONLINE:
377 case BFA_FCS_ITNIM_SM_INITIATOR: 407 case BFA_FCS_ITNIM_SM_INITIATOR:
378 break; 408 break;
379 409
@@ -426,11 +456,12 @@ bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
426 456
427 bfa_trc(itnim->fcs, itnim->rport->pwwn); 457 bfa_trc(itnim->fcs, itnim->rport->pwwn);
428 458
429 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 459 fcxp = fcxp_alloced ? fcxp_alloced :
460 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
430 if (!fcxp) { 461 if (!fcxp) {
431 itnim->stats.fcxp_alloc_wait++; 462 itnim->stats.fcxp_alloc_wait++;
432 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe, 463 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe,
433 bfa_fcs_itnim_send_prli, itnim); 464 bfa_fcs_itnim_send_prli, itnim, BFA_TRUE);
434 return; 465 return;
435 } 466 }
436 itnim->fcxp = fcxp; 467 itnim->fcxp = fcxp;
@@ -483,7 +514,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
483 if (prli_resp->parampage.servparams.initiator) { 514 if (prli_resp->parampage.servparams.initiator) {
484 bfa_trc(itnim->fcs, prli_resp->parampage.type); 515 bfa_trc(itnim->fcs, prli_resp->parampage.type);
485 itnim->rport->scsi_function = 516 itnim->rport->scsi_function =
486 BFA_RPORT_INITIATOR; 517 BFA_RPORT_INITIATOR;
487 itnim->stats.prli_rsp_acc++; 518 itnim->stats.prli_rsp_acc++;
488 itnim->stats.initiator++; 519 itnim->stats.initiator++;
489 bfa_sm_send_event(itnim, 520 bfa_sm_send_event(itnim,
@@ -531,7 +562,11 @@ bfa_fcs_itnim_timeout(void *arg)
531static void 562static void
532bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim) 563bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
533{ 564{
534 bfa_itnim_delete(itnim->bfa_itnim); 565 if (itnim->bfa_itnim) {
566 bfa_itnim_delete(itnim->bfa_itnim);
567 itnim->bfa_itnim = NULL;
568 }
569
535 bfa_fcb_itnim_free(itnim->fcs->bfad, itnim->itnim_drv); 570 bfa_fcb_itnim_free(itnim->fcs->bfad, itnim->itnim_drv);
536} 571}
537 572
@@ -552,7 +587,6 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
552 struct bfa_fcs_lport_s *port = rport->port; 587 struct bfa_fcs_lport_s *port = rport->port;
553 struct bfa_fcs_itnim_s *itnim; 588 struct bfa_fcs_itnim_s *itnim;
554 struct bfad_itnim_s *itnim_drv; 589 struct bfad_itnim_s *itnim_drv;
555 struct bfa_itnim_s *bfa_itnim;
556 590
557 /* 591 /*
558 * call bfad to allocate the itnim 592 * call bfad to allocate the itnim
@@ -570,20 +604,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
570 itnim->fcs = rport->fcs; 604 itnim->fcs = rport->fcs;
571 itnim->itnim_drv = itnim_drv; 605 itnim->itnim_drv = itnim_drv;
572 606
573 /* 607 itnim->bfa_itnim = NULL;
574 * call BFA to create the itnim
575 */
576 bfa_itnim =
577 bfa_itnim_create(port->fcs->bfa, rport->bfa_rport, itnim);
578
579 if (bfa_itnim == NULL) {
580 bfa_trc(port->fcs, rport->pwwn);
581 bfa_fcb_itnim_free(port->fcs->bfad, itnim_drv);
582 WARN_ON(1);
583 return NULL;
584 }
585
586 itnim->bfa_itnim = bfa_itnim;
587 itnim->seq_rec = BFA_FALSE; 608 itnim->seq_rec = BFA_FALSE;
588 itnim->rec_support = BFA_FALSE; 609 itnim->rec_support = BFA_FALSE;
589 itnim->conf_comp = BFA_FALSE; 610 itnim->conf_comp = BFA_FALSE;
@@ -613,20 +634,12 @@ bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim)
613 * Notification from rport that PLOGI is complete to initiate FC-4 session. 634 * Notification from rport that PLOGI is complete to initiate FC-4 session.
614 */ 635 */
615void 636void
616bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim) 637bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim)
617{ 638{
618 itnim->stats.onlines++; 639 itnim->stats.onlines++;
619 640
620 if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid)) { 641 if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid))
621 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_ONLINE); 642 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HAL_ONLINE);
622 } else {
623 /*
624 * For well known addresses, we set the itnim to initiator
625 * state
626 */
627 itnim->stats.initiator++;
628 bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
629 }
630} 643}
631 644
632/* 645/*
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index bcc4966e8ba..3b75f6fb2de 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -131,6 +131,8 @@ bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
131 /* If vport - send completion call back */ 131 /* If vport - send completion call back */
132 if (port->vport) 132 if (port->vport)
133 bfa_fcs_vport_stop_comp(port->vport); 133 bfa_fcs_vport_stop_comp(port->vport);
134 else
135 bfa_wc_down(&(port->fabric->stop_wc));
134 break; 136 break;
135 137
136 case BFA_FCS_PORT_SM_OFFLINE: 138 case BFA_FCS_PORT_SM_OFFLINE:
@@ -166,6 +168,8 @@ bfa_fcs_lport_sm_online(
166 /* If vport - send completion call back */ 168 /* If vport - send completion call back */
167 if (port->vport) 169 if (port->vport)
168 bfa_fcs_vport_stop_comp(port->vport); 170 bfa_fcs_vport_stop_comp(port->vport);
171 else
172 bfa_wc_down(&(port->fabric->stop_wc));
169 } else { 173 } else {
170 bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping); 174 bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
171 list_for_each_safe(qe, qen, &port->rport_q) { 175 list_for_each_safe(qe, qen, &port->rport_q) {
@@ -222,6 +226,8 @@ bfa_fcs_lport_sm_offline(
222 /* If vport - send completion call back */ 226 /* If vport - send completion call back */
223 if (port->vport) 227 if (port->vport)
224 bfa_fcs_vport_stop_comp(port->vport); 228 bfa_fcs_vport_stop_comp(port->vport);
229 else
230 bfa_wc_down(&(port->fabric->stop_wc));
225 } else { 231 } else {
226 bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping); 232 bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
227 list_for_each_safe(qe, qen, &port->rport_q) { 233 list_for_each_safe(qe, qen, &port->rport_q) {
@@ -267,6 +273,8 @@ bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
267 /* If vport - send completion call back */ 273 /* If vport - send completion call back */
268 if (port->vport) 274 if (port->vport)
269 bfa_fcs_vport_stop_comp(port->vport); 275 bfa_fcs_vport_stop_comp(port->vport);
276 else
277 bfa_wc_down(&(port->fabric->stop_wc));
270 } 278 }
271 break; 279 break;
272 280
@@ -340,7 +348,7 @@ bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
340 bfa_trc(port->fcs, rx_fchs->d_id); 348 bfa_trc(port->fcs, rx_fchs->d_id);
341 bfa_trc(port->fcs, rx_fchs->s_id); 349 bfa_trc(port->fcs, rx_fchs->s_id);
342 350
343 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 351 fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
344 if (!fcxp) 352 if (!fcxp)
345 return; 353 return;
346 354
@@ -370,7 +378,7 @@ bfa_fcs_lport_send_fcgs_rjt(struct bfa_fcs_lport_s *port,
370 bfa_trc(port->fcs, rx_fchs->d_id); 378 bfa_trc(port->fcs, rx_fchs->d_id);
371 bfa_trc(port->fcs, rx_fchs->s_id); 379 bfa_trc(port->fcs, rx_fchs->s_id);
372 380
373 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 381 fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
374 if (!fcxp) 382 if (!fcxp)
375 return; 383 return;
376 384
@@ -507,7 +515,7 @@ bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
507 bfa_trc(port->fcs, rx_fchs->s_id); 515 bfa_trc(port->fcs, rx_fchs->s_id);
508 bfa_trc(port->fcs, rx_fchs->d_id); 516 bfa_trc(port->fcs, rx_fchs->d_id);
509 517
510 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 518 fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
511 if (!fcxp) 519 if (!fcxp)
512 return; 520 return;
513 521
@@ -552,7 +560,7 @@ bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
552 bfa_trc(port->fcs, rx_fchs->d_id); 560 bfa_trc(port->fcs, rx_fchs->d_id);
553 bfa_trc(port->fcs, rx_len); 561 bfa_trc(port->fcs, rx_len);
554 562
555 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 563 fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
556 if (!fcxp) 564 if (!fcxp)
557 return; 565 return;
558 566
@@ -684,7 +692,7 @@ bfa_fcs_lport_abts_acc(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs)
684 bfa_trc(port->fcs, rx_fchs->d_id); 692 bfa_trc(port->fcs, rx_fchs->d_id);
685 bfa_trc(port->fcs, rx_fchs->s_id); 693 bfa_trc(port->fcs, rx_fchs->s_id);
686 694
687 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 695 fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
688 if (!fcxp) 696 if (!fcxp)
689 return; 697 return;
690 698
@@ -854,6 +862,25 @@ bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid)
854} 862}
855 863
856/* 864/*
865 * OLD_PID based Lookup for a R-Port in the Port R-Port Queue
866 */
867struct bfa_fcs_rport_s *
868bfa_fcs_lport_get_rport_by_old_pid(struct bfa_fcs_lport_s *port, u32 pid)
869{
870 struct bfa_fcs_rport_s *rport;
871 struct list_head *qe;
872
873 list_for_each(qe, &port->rport_q) {
874 rport = (struct bfa_fcs_rport_s *) qe;
875 if (rport->old_pid == pid)
876 return rport;
877 }
878
879 bfa_trc(port->fcs, pid);
880 return NULL;
881}
882
883/*
857 * PWWN based Lookup for a R-Port in the Port R-Port Queue 884 * PWWN based Lookup for a R-Port in the Port R-Port Queue
858 */ 885 */
859struct bfa_fcs_rport_s * 886struct bfa_fcs_rport_s *
@@ -892,6 +919,26 @@ bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn)
892} 919}
893 920
894/* 921/*
922 * PWWN & PID based Lookup for a R-Port in the Port R-Port Queue
923 */
924struct bfa_fcs_rport_s *
925bfa_fcs_lport_get_rport_by_qualifier(struct bfa_fcs_lport_s *port,
926 wwn_t pwwn, u32 pid)
927{
928 struct bfa_fcs_rport_s *rport;
929 struct list_head *qe;
930
931 list_for_each(qe, &port->rport_q) {
932 rport = (struct bfa_fcs_rport_s *) qe;
933 if (wwn_is_equal(rport->pwwn, pwwn) && rport->pid == pid)
934 return rport;
935 }
936
937 bfa_trc(port->fcs, pwwn);
938 return NULL;
939}
940
941/*
895 * Called by rport module when new rports are discovered. 942 * Called by rport module when new rports are discovered.
896 */ 943 */
897void 944void
@@ -939,6 +986,16 @@ bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port)
939} 986}
940 987
941/* 988/*
989 * Called by fabric for base port and by vport for virtual ports
990 * when target mode driver is unloaded.
991 */
992void
993bfa_fcs_lport_stop(struct bfa_fcs_lport_s *port)
994{
995 bfa_sm_send_event(port, BFA_FCS_PORT_SM_STOP);
996}
997
998/*
942 * Called by fabric to delete base lport and associated resources. 999 * Called by fabric to delete base lport and associated resources.
943 * 1000 *
944 * Called by vport to delete lport and associated resources. Should call 1001 * Called by vport to delete lport and associated resources. Should call
@@ -1657,10 +1714,11 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1657 1714
1658 bfa_trc(port->fcs, port->port_cfg.pwwn); 1715 bfa_trc(port->fcs, port->port_cfg.pwwn);
1659 1716
1660 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1717 fcxp = fcxp_alloced ? fcxp_alloced :
1718 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
1661 if (!fcxp) { 1719 if (!fcxp) {
1662 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe, 1720 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
1663 bfa_fcs_lport_fdmi_send_rhba, fdmi); 1721 bfa_fcs_lport_fdmi_send_rhba, fdmi, BFA_TRUE);
1664 return; 1722 return;
1665 } 1723 }
1666 fdmi->fcxp = fcxp; 1724 fdmi->fcxp = fcxp;
@@ -1931,10 +1989,11 @@ bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1931 1989
1932 bfa_trc(port->fcs, port->port_cfg.pwwn); 1990 bfa_trc(port->fcs, port->port_cfg.pwwn);
1933 1991
1934 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1992 fcxp = fcxp_alloced ? fcxp_alloced :
1993 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
1935 if (!fcxp) { 1994 if (!fcxp) {
1936 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe, 1995 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
1937 bfa_fcs_lport_fdmi_send_rprt, fdmi); 1996 bfa_fcs_lport_fdmi_send_rprt, fdmi, BFA_TRUE);
1938 return; 1997 return;
1939 } 1998 }
1940 fdmi->fcxp = fcxp; 1999 fdmi->fcxp = fcxp;
@@ -2146,10 +2205,11 @@ bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2146 2205
2147 bfa_trc(port->fcs, port->port_cfg.pwwn); 2206 bfa_trc(port->fcs, port->port_cfg.pwwn);
2148 2207
2149 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 2208 fcxp = fcxp_alloced ? fcxp_alloced :
2209 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
2150 if (!fcxp) { 2210 if (!fcxp) {
2151 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe, 2211 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
2152 bfa_fcs_lport_fdmi_send_rpa, fdmi); 2212 bfa_fcs_lport_fdmi_send_rpa, fdmi, BFA_TRUE);
2153 return; 2213 return;
2154 } 2214 }
2155 fdmi->fcxp = fcxp; 2215 fdmi->fcxp = fcxp;
@@ -2736,10 +2796,11 @@ bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2736 2796
2737 bfa_trc(port->fcs, port->pid); 2797 bfa_trc(port->fcs, port->pid);
2738 2798
2739 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 2799 fcxp = fcxp_alloced ? fcxp_alloced :
2800 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
2740 if (!fcxp) { 2801 if (!fcxp) {
2741 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe, 2802 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
2742 bfa_fcs_lport_ms_send_gmal, ms); 2803 bfa_fcs_lport_ms_send_gmal, ms, BFA_TRUE);
2743 return; 2804 return;
2744 } 2805 }
2745 ms->fcxp = fcxp; 2806 ms->fcxp = fcxp;
@@ -2936,10 +2997,11 @@ bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2936 2997
2937 bfa_trc(port->fcs, port->pid); 2998 bfa_trc(port->fcs, port->pid);
2938 2999
2939 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 3000 fcxp = fcxp_alloced ? fcxp_alloced :
3001 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
2940 if (!fcxp) { 3002 if (!fcxp) {
2941 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe, 3003 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
2942 bfa_fcs_lport_ms_send_gfn, ms); 3004 bfa_fcs_lport_ms_send_gfn, ms, BFA_TRUE);
2943 return; 3005 return;
2944 } 3006 }
2945 ms->fcxp = fcxp; 3007 ms->fcxp = fcxp;
@@ -3012,11 +3074,12 @@ bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3012 3074
3013 bfa_trc(port->fcs, port->pid); 3075 bfa_trc(port->fcs, port->pid);
3014 3076
3015 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 3077 fcxp = fcxp_alloced ? fcxp_alloced :
3078 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
3016 if (!fcxp) { 3079 if (!fcxp) {
3017 port->stats.ms_plogi_alloc_wait++; 3080 port->stats.ms_plogi_alloc_wait++;
3018 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe, 3081 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
3019 bfa_fcs_lport_ms_send_plogi, ms); 3082 bfa_fcs_lport_ms_send_plogi, ms, BFA_TRUE);
3020 return; 3083 return;
3021 } 3084 }
3022 ms->fcxp = fcxp; 3085 ms->fcxp = fcxp;
@@ -3166,6 +3229,10 @@ static void bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg,
3166 struct bfa_fcxp_s *fcxp_alloced); 3229 struct bfa_fcxp_s *fcxp_alloced);
3167static void bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg, 3230static void bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg,
3168 struct bfa_fcxp_s *fcxp_alloced); 3231 struct bfa_fcxp_s *fcxp_alloced);
3232static void bfa_fcs_lport_ns_send_rnn_id(void *ns_cbarg,
3233 struct bfa_fcxp_s *fcxp_alloced);
3234static void bfa_fcs_lport_ns_send_rsnn_nn(void *ns_cbarg,
3235 struct bfa_fcxp_s *fcxp_alloced);
3169static void bfa_fcs_lport_ns_timeout(void *arg); 3236static void bfa_fcs_lport_ns_timeout(void *arg);
3170static void bfa_fcs_lport_ns_plogi_response(void *fcsarg, 3237static void bfa_fcs_lport_ns_plogi_response(void *fcsarg,
3171 struct bfa_fcxp_s *fcxp, 3238 struct bfa_fcxp_s *fcxp,
@@ -3202,6 +3269,20 @@ static void bfa_fcs_lport_ns_gid_ft_response(void *fcsarg,
3202 u32 rsp_len, 3269 u32 rsp_len,
3203 u32 resid_len, 3270 u32 resid_len,
3204 struct fchs_s *rsp_fchs); 3271 struct fchs_s *rsp_fchs);
3272static void bfa_fcs_lport_ns_rnn_id_response(void *fcsarg,
3273 struct bfa_fcxp_s *fcxp,
3274 void *cbarg,
3275 bfa_status_t req_status,
3276 u32 rsp_len,
3277 u32 resid_len,
3278 struct fchs_s *rsp_fchs);
3279static void bfa_fcs_lport_ns_rsnn_nn_response(void *fcsarg,
3280 struct bfa_fcxp_s *fcxp,
3281 void *cbarg,
3282 bfa_status_t req_status,
3283 u32 rsp_len,
3284 u32 resid_len,
3285 struct fchs_s *rsp_fchs);
3205static void bfa_fcs_lport_ns_process_gidft_pids( 3286static void bfa_fcs_lport_ns_process_gidft_pids(
3206 struct bfa_fcs_lport_s *port, 3287 struct bfa_fcs_lport_s *port,
3207 u32 *pid_buf, u32 n_pids); 3288 u32 *pid_buf, u32 n_pids);
@@ -3226,6 +3307,8 @@ enum vport_ns_event {
3226 NSSM_EVENT_RFTID_SENT = 9, 3307 NSSM_EVENT_RFTID_SENT = 9,
3227 NSSM_EVENT_RFFID_SENT = 10, 3308 NSSM_EVENT_RFFID_SENT = 10,
3228 NSSM_EVENT_GIDFT_SENT = 11, 3309 NSSM_EVENT_GIDFT_SENT = 11,
3310 NSSM_EVENT_RNNID_SENT = 12,
3311 NSSM_EVENT_RSNN_NN_SENT = 13,
3229}; 3312};
3230 3313
3231static void bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns, 3314static void bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns,
@@ -3266,6 +3349,21 @@ static void bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
3266 enum vport_ns_event event); 3349 enum vport_ns_event event);
3267static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns, 3350static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
3268 enum vport_ns_event event); 3351 enum vport_ns_event event);
3352static void bfa_fcs_lport_ns_sm_sending_rnn_id(
3353 struct bfa_fcs_lport_ns_s *ns,
3354 enum vport_ns_event event);
3355static void bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns,
3356 enum vport_ns_event event);
3357static void bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns,
3358 enum vport_ns_event event);
3359static void bfa_fcs_lport_ns_sm_sending_rsnn_nn(
3360 struct bfa_fcs_lport_ns_s *ns,
3361 enum vport_ns_event event);
3362static void bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
3363 enum vport_ns_event event);
3364static void bfa_fcs_lport_ns_sm_rsnn_nn_retry(
3365 struct bfa_fcs_lport_ns_s *ns,
3366 enum vport_ns_event event);
3269/* 3367/*
3270 * Start in offline state - awaiting linkup 3368 * Start in offline state - awaiting linkup
3271 */ 3369 */
@@ -3333,8 +3431,9 @@ bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns,
3333 break; 3431 break;
3334 3432
3335 case NSSM_EVENT_RSP_OK: 3433 case NSSM_EVENT_RSP_OK:
3336 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id); 3434 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rnn_id);
3337 bfa_fcs_lport_ns_send_rspn_id(ns, NULL); 3435 ns->num_rnnid_retries = 0;
3436 bfa_fcs_lport_ns_send_rnn_id(ns, NULL);
3338 break; 3437 break;
3339 3438
3340 case NSSM_EVENT_PORT_OFFLINE: 3439 case NSSM_EVENT_PORT_OFFLINE:
@@ -3374,6 +3473,176 @@ bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns,
3374} 3473}
3375 3474
3376static void 3475static void
3476bfa_fcs_lport_ns_sm_sending_rnn_id(struct bfa_fcs_lport_ns_s *ns,
3477 enum vport_ns_event event)
3478{
3479 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3480 bfa_trc(ns->port->fcs, event);
3481
3482 switch (event) {
3483 case NSSM_EVENT_RNNID_SENT:
3484 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rnn_id);
3485 break;
3486
3487 case NSSM_EVENT_PORT_OFFLINE:
3488 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3489 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3490 &ns->fcxp_wqe);
3491 break;
3492 default:
3493 bfa_sm_fault(ns->port->fcs, event);
3494 }
3495}
3496
3497static void
3498bfa_fcs_lport_ns_sm_rnn_id(struct bfa_fcs_lport_ns_s *ns,
3499 enum vport_ns_event event)
3500{
3501 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3502 bfa_trc(ns->port->fcs, event);
3503
3504 switch (event) {
3505 case NSSM_EVENT_RSP_OK:
3506 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rsnn_nn);
3507 ns->num_rnnid_retries = 0;
3508 ns->num_rsnn_nn_retries = 0;
3509 bfa_fcs_lport_ns_send_rsnn_nn(ns, NULL);
3510 break;
3511
3512 case NSSM_EVENT_RSP_ERROR:
3513 if (ns->num_rnnid_retries < BFA_FCS_MAX_NS_RETRIES) {
3514 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rnn_id_retry);
3515 ns->port->stats.ns_retries++;
3516 ns->num_rnnid_retries++;
3517 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3518 &ns->timer, bfa_fcs_lport_ns_timeout, ns,
3519 BFA_FCS_RETRY_TIMEOUT);
3520 } else {
3521 bfa_sm_set_state(ns,
3522 bfa_fcs_lport_ns_sm_sending_rspn_id);
3523 bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
3524 }
3525 break;
3526
3527 case NSSM_EVENT_PORT_OFFLINE:
3528 bfa_fcxp_discard(ns->fcxp);
3529 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3530 break;
3531
3532 default:
3533 bfa_sm_fault(ns->port->fcs, event);
3534 }
3535}
3536
3537static void
3538bfa_fcs_lport_ns_sm_rnn_id_retry(struct bfa_fcs_lport_ns_s *ns,
3539 enum vport_ns_event event)
3540{
3541 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3542 bfa_trc(ns->port->fcs, event);
3543
3544 switch (event) {
3545 case NSSM_EVENT_TIMEOUT:
3546 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rnn_id);
3547 bfa_fcs_lport_ns_send_rnn_id(ns, NULL);
3548 break;
3549
3550 case NSSM_EVENT_PORT_OFFLINE:
3551 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3552 bfa_timer_stop(&ns->timer);
3553 break;
3554
3555 default:
3556 bfa_sm_fault(ns->port->fcs, event);
3557 }
3558}
3559
3560static void
3561bfa_fcs_lport_ns_sm_sending_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
3562 enum vport_ns_event event)
3563{
3564 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3565 bfa_trc(ns->port->fcs, event);
3566
3567 switch (event) {
3568 case NSSM_EVENT_RSNN_NN_SENT:
3569 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rsnn_nn);
3570 break;
3571
3572 case NSSM_EVENT_PORT_OFFLINE:
3573 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3574 bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3575 &ns->fcxp_wqe);
3576 break;
3577
3578 default:
3579 bfa_sm_fault(ns->port->fcs, event);
3580 }
3581}
3582
3583static void
3584bfa_fcs_lport_ns_sm_rsnn_nn(struct bfa_fcs_lport_ns_s *ns,
3585 enum vport_ns_event event)
3586{
3587 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3588 bfa_trc(ns->port->fcs, event);
3589
3590 switch (event) {
3591 case NSSM_EVENT_RSP_OK:
3592 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
3593 ns->num_rsnn_nn_retries = 0;
3594 bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
3595 break;
3596
3597 case NSSM_EVENT_RSP_ERROR:
3598 if (ns->num_rsnn_nn_retries < BFA_FCS_MAX_NS_RETRIES) {
3599 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rsnn_nn_retry);
3600 ns->port->stats.ns_retries++;
3601 ns->num_rsnn_nn_retries++;
3602 bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
3603 &ns->timer, bfa_fcs_lport_ns_timeout,
3604 ns, BFA_FCS_RETRY_TIMEOUT);
3605 } else {
3606 bfa_sm_set_state(ns,
3607 bfa_fcs_lport_ns_sm_sending_rspn_id);
3608 bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
3609 }
3610 break;
3611
3612 case NSSM_EVENT_PORT_OFFLINE:
3613 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3614 bfa_fcxp_discard(ns->fcxp);
3615 break;
3616
3617 default:
3618 bfa_sm_fault(ns->port->fcs, event);
3619 }
3620}
3621
3622static void
3623bfa_fcs_lport_ns_sm_rsnn_nn_retry(struct bfa_fcs_lport_ns_s *ns,
3624 enum vport_ns_event event)
3625{
3626 bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
3627 bfa_trc(ns->port->fcs, event);
3628
3629 switch (event) {
3630 case NSSM_EVENT_TIMEOUT:
3631 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rsnn_nn);
3632 bfa_fcs_lport_ns_send_rsnn_nn(ns, NULL);
3633 break;
3634
3635 case NSSM_EVENT_PORT_OFFLINE:
3636 bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
3637 bfa_timer_stop(&ns->timer);
3638 break;
3639
3640 default:
3641 bfa_sm_fault(ns->port->fcs, event);
3642 }
3643}
3644
3645static void
3377bfa_fcs_lport_ns_sm_sending_rspn_id(struct bfa_fcs_lport_ns_s *ns, 3646bfa_fcs_lport_ns_sm_sending_rspn_id(struct bfa_fcs_lport_ns_s *ns,
3378 enum vport_ns_event event) 3647 enum vport_ns_event event)
3379{ 3648{
@@ -3770,11 +4039,12 @@ bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3770 4039
3771 bfa_trc(port->fcs, port->pid); 4040 bfa_trc(port->fcs, port->pid);
3772 4041
3773 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 4042 fcxp = fcxp_alloced ? fcxp_alloced :
4043 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
3774 if (!fcxp) { 4044 if (!fcxp) {
3775 port->stats.ns_plogi_alloc_wait++; 4045 port->stats.ns_plogi_alloc_wait++;
3776 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, 4046 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
3777 bfa_fcs_lport_ns_send_plogi, ns); 4047 bfa_fcs_lport_ns_send_plogi, ns, BFA_TRUE);
3778 return; 4048 return;
3779 } 4049 }
3780 ns->fcxp = fcxp; 4050 ns->fcxp = fcxp;
@@ -3853,6 +4123,162 @@ bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
3853} 4123}
3854 4124
3855/* 4125/*
4126 * Register node name for port_id
4127 */
4128static void
4129bfa_fcs_lport_ns_send_rnn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
4130{
4131 struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
4132 struct bfa_fcs_lport_s *port = ns->port;
4133 struct fchs_s fchs;
4134 int len;
4135 struct bfa_fcxp_s *fcxp;
4136
4137 bfa_trc(port->fcs, port->port_cfg.pwwn);
4138
4139 fcxp = fcxp_alloced ? fcxp_alloced :
4140 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
4141 if (!fcxp) {
4142 port->stats.ns_rnnid_alloc_wait++;
4143 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
4144 bfa_fcs_lport_ns_send_rnn_id, ns, BFA_TRUE);
4145 return;
4146 }
4147
4148 ns->fcxp = fcxp;
4149
4150 len = fc_rnnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
4151 bfa_fcs_lport_get_fcid(port),
4152 bfa_fcs_lport_get_fcid(port),
4153 bfa_fcs_lport_get_nwwn(port));
4154
4155 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
4156 FC_CLASS_3, len, &fchs,
4157 bfa_fcs_lport_ns_rnn_id_response, (void *)ns,
4158 FC_MAX_PDUSZ, FC_FCCT_TOV);
4159
4160 port->stats.ns_rnnid_sent++;
4161 bfa_sm_send_event(ns, NSSM_EVENT_RNNID_SENT);
4162}
4163
4164static void
4165bfa_fcs_lport_ns_rnn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
4166 void *cbarg, bfa_status_t req_status,
4167 u32 rsp_len, u32 resid_len,
4168 struct fchs_s *rsp_fchs)
4169
4170{
4171 struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
4172 struct bfa_fcs_lport_s *port = ns->port;
4173 struct ct_hdr_s *cthdr = NULL;
4174
4175 bfa_trc(port->fcs, port->port_cfg.pwwn);
4176
4177 /*
4178 * Sanity Checks
4179 */
4180 if (req_status != BFA_STATUS_OK) {
4181 bfa_trc(port->fcs, req_status);
4182 port->stats.ns_rnnid_rsp_err++;
4183 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
4184 return;
4185 }
4186
4187 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
4188 cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
4189
4190 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
4191 port->stats.ns_rnnid_accepts++;
4192 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
4193 return;
4194 }
4195
4196 port->stats.ns_rnnid_rejects++;
4197 bfa_trc(port->fcs, cthdr->reason_code);
4198 bfa_trc(port->fcs, cthdr->exp_code);
4199 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
4200}
4201
4202/*
4203 * Register the symbolic node name for a given node name.
4204 */
4205static void
4206bfa_fcs_lport_ns_send_rsnn_nn(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
4207{
4208 struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
4209 struct bfa_fcs_lport_s *port = ns->port;
4210 struct fchs_s fchs;
4211 int len;
4212 struct bfa_fcxp_s *fcxp;
4213 u8 *nsymbl;
4214
4215 bfa_trc(port->fcs, port->port_cfg.pwwn);
4216
4217 fcxp = fcxp_alloced ? fcxp_alloced :
4218 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
4219 if (!fcxp) {
4220 port->stats.ns_rsnn_nn_alloc_wait++;
4221 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
4222 bfa_fcs_lport_ns_send_rsnn_nn, ns, BFA_TRUE);
4223 return;
4224 }
4225 ns->fcxp = fcxp;
4226
4227 nsymbl = (u8 *) &(bfa_fcs_lport_get_nsym_name(
4228 bfa_fcs_get_base_port(port->fcs)));
4229
4230 len = fc_rsnn_nn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
4231 bfa_fcs_lport_get_fcid(port),
4232 bfa_fcs_lport_get_nwwn(port), nsymbl);
4233
4234 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
4235 FC_CLASS_3, len, &fchs,
4236 bfa_fcs_lport_ns_rsnn_nn_response, (void *)ns,
4237 FC_MAX_PDUSZ, FC_FCCT_TOV);
4238
4239 port->stats.ns_rsnn_nn_sent++;
4240
4241 bfa_sm_send_event(ns, NSSM_EVENT_RSNN_NN_SENT);
4242}
4243
4244static void
4245bfa_fcs_lport_ns_rsnn_nn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
4246 void *cbarg, bfa_status_t req_status,
4247 u32 rsp_len, u32 resid_len,
4248 struct fchs_s *rsp_fchs)
4249{
4250 struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
4251 struct bfa_fcs_lport_s *port = ns->port;
4252 struct ct_hdr_s *cthdr = NULL;
4253
4254 bfa_trc(port->fcs, port->port_cfg.pwwn);
4255
4256 /*
4257 * Sanity Checks
4258 */
4259 if (req_status != BFA_STATUS_OK) {
4260 bfa_trc(port->fcs, req_status);
4261 port->stats.ns_rsnn_nn_rsp_err++;
4262 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
4263 return;
4264 }
4265
4266 cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
4267 cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
4268
4269 if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
4270 port->stats.ns_rsnn_nn_accepts++;
4271 bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
4272 return;
4273 }
4274
4275 port->stats.ns_rsnn_nn_rejects++;
4276 bfa_trc(port->fcs, cthdr->reason_code);
4277 bfa_trc(port->fcs, cthdr->exp_code);
4278 bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
4279}
4280
4281/*
3856 * Register the symbolic port name. 4282 * Register the symbolic port name.
3857 */ 4283 */
3858static void 4284static void
@@ -3870,11 +4296,12 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3870 4296
3871 bfa_trc(port->fcs, port->port_cfg.pwwn); 4297 bfa_trc(port->fcs, port->port_cfg.pwwn);
3872 4298
3873 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 4299 fcxp = fcxp_alloced ? fcxp_alloced :
4300 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
3874 if (!fcxp) { 4301 if (!fcxp) {
3875 port->stats.ns_rspnid_alloc_wait++; 4302 port->stats.ns_rspnid_alloc_wait++;
3876 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, 4303 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
3877 bfa_fcs_lport_ns_send_rspn_id, ns); 4304 bfa_fcs_lport_ns_send_rspn_id, ns, BFA_TRUE);
3878 return; 4305 return;
3879 } 4306 }
3880 ns->fcxp = fcxp; 4307 ns->fcxp = fcxp;
@@ -3971,11 +4398,12 @@ bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
3971 4398
3972 bfa_trc(port->fcs, port->port_cfg.pwwn); 4399 bfa_trc(port->fcs, port->port_cfg.pwwn);
3973 4400
3974 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 4401 fcxp = fcxp_alloced ? fcxp_alloced :
4402 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
3975 if (!fcxp) { 4403 if (!fcxp) {
3976 port->stats.ns_rftid_alloc_wait++; 4404 port->stats.ns_rftid_alloc_wait++;
3977 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, 4405 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
3978 bfa_fcs_lport_ns_send_rft_id, ns); 4406 bfa_fcs_lport_ns_send_rft_id, ns, BFA_TRUE);
3979 return; 4407 return;
3980 } 4408 }
3981 ns->fcxp = fcxp; 4409 ns->fcxp = fcxp;
@@ -4044,11 +4472,12 @@ bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
4044 4472
4045 bfa_trc(port->fcs, port->port_cfg.pwwn); 4473 bfa_trc(port->fcs, port->port_cfg.pwwn);
4046 4474
4047 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 4475 fcxp = fcxp_alloced ? fcxp_alloced :
4476 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
4048 if (!fcxp) { 4477 if (!fcxp) {
4049 port->stats.ns_rffid_alloc_wait++; 4478 port->stats.ns_rffid_alloc_wait++;
4050 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, 4479 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
4051 bfa_fcs_lport_ns_send_rff_id, ns); 4480 bfa_fcs_lport_ns_send_rff_id, ns, BFA_TRUE);
4052 return; 4481 return;
4053 } 4482 }
4054 ns->fcxp = fcxp; 4483 ns->fcxp = fcxp;
@@ -4127,11 +4556,12 @@ bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
4127 4556
4128 bfa_trc(port->fcs, port->pid); 4557 bfa_trc(port->fcs, port->pid);
4129 4558
4130 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 4559 fcxp = fcxp_alloced ? fcxp_alloced :
4560 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
4131 if (!fcxp) { 4561 if (!fcxp) {
4132 port->stats.ns_gidft_alloc_wait++; 4562 port->stats.ns_gidft_alloc_wait++;
4133 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, 4563 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
4134 bfa_fcs_lport_ns_send_gid_ft, ns); 4564 bfa_fcs_lport_ns_send_gid_ft, ns, BFA_TRUE);
4135 return; 4565 return;
4136 } 4566 }
4137 ns->fcxp = fcxp; 4567 ns->fcxp = fcxp;
@@ -4261,6 +4691,10 @@ bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
4261 struct fcgs_gidft_resp_s *gidft_entry; 4691 struct fcgs_gidft_resp_s *gidft_entry;
4262 struct bfa_fcs_rport_s *rport; 4692 struct bfa_fcs_rport_s *rport;
4263 u32 ii; 4693 u32 ii;
4694 struct bfa_fcs_fabric_s *fabric = port->fabric;
4695 struct bfa_fcs_vport_s *vport;
4696 struct list_head *qe;
4697 u8 found = 0;
4264 4698
4265 for (ii = 0; ii < n_pids; ii++) { 4699 for (ii = 0; ii < n_pids; ii++) {
4266 gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii]; 4700 gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii];
@@ -4269,6 +4703,29 @@ bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
4269 continue; 4703 continue;
4270 4704
4271 /* 4705 /*
4706 * Ignore PID if it is of base port
4707 * (Avoid vports discovering base port as remote port)
4708 */
4709 if (gidft_entry->pid == fabric->bport.pid)
4710 continue;
4711
4712 /*
4713 * Ignore PID if it is of vport created on the same base port
4714 * (Avoid vport discovering every other vport created on the
4715 * same port as remote port)
4716 */
4717 list_for_each(qe, &fabric->vport_q) {
4718 vport = (struct bfa_fcs_vport_s *) qe;
4719 if (vport->lport.pid == gidft_entry->pid)
4720 found = 1;
4721 }
4722
4723 if (found) {
4724 found = 0;
4725 continue;
4726 }
4727
4728 /*
4272 * Check if this rport already exists 4729 * Check if this rport already exists
4273 */ 4730 */
4274 rport = bfa_fcs_lport_get_rport_by_pid(port, gidft_entry->pid); 4731 rport = bfa_fcs_lport_get_rport_by_pid(port, gidft_entry->pid);
@@ -4335,7 +4792,8 @@ bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port)
4335 struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); 4792 struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
4336 4793
4337 bfa_trc(port->fcs, port->pid); 4794 bfa_trc(port->fcs, port->pid);
4338 bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY); 4795 if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_online))
4796 bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
4339} 4797}
4340 4798
4341static void 4799static void
@@ -4355,6 +4813,70 @@ bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
4355 } 4813 }
4356} 4814}
4357 4815
4816void
4817bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
4818{
4819 struct bfa_fcs_lport_ns_s *ns = cbarg;
4820 struct bfa_fcs_lport_s *port = ns->port;
4821 struct fchs_s fchs;
4822 struct bfa_fcxp_s *fcxp;
4823 u8 symbl[256];
4824 u8 *psymbl = &symbl[0];
4825 int len;
4826
4827 if (!bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
4828 return;
4829
4830 /* Avoid sending RSPN in the following states. */
4831 if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) ||
4832 bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) ||
4833 bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi) ||
4834 bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_retry) ||
4835 bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_rspn_id_retry))
4836 return;
4837
4838 memset(symbl, 0, sizeof(symbl));
4839 bfa_trc(port->fcs, port->port_cfg.pwwn);
4840
4841 fcxp = fcxp_alloced ? fcxp_alloced :
4842 bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
4843 if (!fcxp) {
4844 port->stats.ns_rspnid_alloc_wait++;
4845 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
4846 bfa_fcs_lport_ns_util_send_rspn_id, ns, BFA_FALSE);
4847 return;
4848 }
4849
4850 ns->fcxp = fcxp;
4851
4852 if (port->vport) {
4853 /*
4854 * For Vports, we append the vport's port symbolic name
4855 * to that of the base port.
4856 */
4857 strncpy((char *)psymbl, (char *)&(bfa_fcs_lport_get_psym_name
4858 (bfa_fcs_get_base_port(port->fcs))),
4859 strlen((char *)&bfa_fcs_lport_get_psym_name(
4860 bfa_fcs_get_base_port(port->fcs))));
4861
4862 /* Ensure we have a null terminating string. */
4863 ((char *)psymbl)[strlen((char *)&bfa_fcs_lport_get_psym_name(
4864 bfa_fcs_get_base_port(port->fcs)))] = 0;
4865
4866 strncat((char *)psymbl,
4867 (char *)&(bfa_fcs_lport_get_psym_name(port)),
4868 strlen((char *)&bfa_fcs_lport_get_psym_name(port)));
4869 }
4870
4871 len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
4872 bfa_fcs_lport_get_fcid(port), 0, psymbl);
4873
4874 bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
4875 FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
4876
4877 port->stats.ns_rspnid_sent++;
4878}
4879
4358/* 4880/*
4359 * FCS SCN 4881 * FCS SCN
4360 */ 4882 */
@@ -4529,10 +5051,11 @@ bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced)
4529 bfa_trc(port->fcs, port->pid); 5051 bfa_trc(port->fcs, port->pid);
4530 bfa_trc(port->fcs, port->port_cfg.pwwn); 5052 bfa_trc(port->fcs, port->port_cfg.pwwn);
4531 5053
4532 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 5054 fcxp = fcxp_alloced ? fcxp_alloced :
5055 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
4533 if (!fcxp) { 5056 if (!fcxp) {
4534 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe, 5057 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe,
4535 bfa_fcs_lport_scn_send_scr, scn); 5058 bfa_fcs_lport_scn_send_scr, scn, BFA_TRUE);
4536 return; 5059 return;
4537 } 5060 }
4538 scn->fcxp = fcxp; 5061 scn->fcxp = fcxp;
@@ -4614,7 +5137,7 @@ bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
4614 5137
4615 bfa_trc(port->fcs, rx_fchs->s_id); 5138 bfa_trc(port->fcs, rx_fchs->s_id);
4616 5139
4617 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 5140 fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
4618 if (!fcxp) 5141 if (!fcxp)
4619 return; 5142 return;
4620 5143
@@ -4688,14 +5211,33 @@ static void
4688bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid) 5211bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
4689{ 5212{
4690 struct bfa_fcs_rport_s *rport; 5213 struct bfa_fcs_rport_s *rport;
5214 struct bfa_fcs_fabric_s *fabric = port->fabric;
5215 struct bfa_fcs_vport_s *vport;
5216 struct list_head *qe;
4691 5217
4692 bfa_trc(port->fcs, rpid); 5218 bfa_trc(port->fcs, rpid);
4693 5219
4694 /* 5220 /*
5221 * Ignore PID if it is of base port or of vports created on the
5222 * same base port. It is to avoid vports discovering base port or
5223 * other vports created on same base port as remote port
5224 */
5225 if (rpid == fabric->bport.pid)
5226 return;
5227
5228 list_for_each(qe, &fabric->vport_q) {
5229 vport = (struct bfa_fcs_vport_s *) qe;
5230 if (vport->lport.pid == rpid)
5231 return;
5232 }
5233 /*
4695 * If this is an unknown device, then it just came online. 5234 * If this is an unknown device, then it just came online.
4696 * Otherwise let rport handle the RSCN event. 5235 * Otherwise let rport handle the RSCN event.
4697 */ 5236 */
4698 rport = bfa_fcs_lport_get_rport_by_pid(port, rpid); 5237 rport = bfa_fcs_lport_get_rport_by_pid(port, rpid);
5238 if (!rport)
5239 rport = bfa_fcs_lport_get_rport_by_old_pid(port, rpid);
5240
4699 if (rport == NULL) { 5241 if (rport == NULL) {
4700 /* 5242 /*
4701 * If min cfg mode is enabled, we donot need to 5243 * If min cfg mode is enabled, we donot need to
@@ -4888,15 +5430,15 @@ bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index,
4888} 5430}
4889 5431
4890void 5432void
4891bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port, 5433bfa_fcs_lport_get_rport_quals(struct bfa_fcs_lport_s *port,
4892 wwn_t rport_wwns[], int *nrports) 5434 struct bfa_rport_qualifier_s rports[], int *nrports)
4893{ 5435{
4894 struct list_head *qh, *qe; 5436 struct list_head *qh, *qe;
4895 struct bfa_fcs_rport_s *rport = NULL; 5437 struct bfa_fcs_rport_s *rport = NULL;
4896 int i; 5438 int i;
4897 struct bfa_fcs_s *fcs; 5439 struct bfa_fcs_s *fcs;
4898 5440
4899 if (port == NULL || rport_wwns == NULL || *nrports == 0) 5441 if (port == NULL || rports == NULL || *nrports == 0)
4900 return; 5442 return;
4901 5443
4902 fcs = port->fcs; 5444 fcs = port->fcs;
@@ -4916,7 +5458,13 @@ bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
4916 continue; 5458 continue;
4917 } 5459 }
4918 5460
4919 rport_wwns[i] = rport->pwwn; 5461 if (!rport->pwwn && !rport->pid) {
5462 qe = bfa_q_next(qe);
5463 continue;
5464 }
5465
5466 rports[i].pwwn = rport->pwwn;
5467 rports[i].pid = rport->pid;
4920 5468
4921 i++; 5469 i++;
4922 qe = bfa_q_next(qe); 5470 qe = bfa_q_next(qe);
@@ -5760,6 +6308,16 @@ bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
5760{ 6308{
5761 vport->vport_stats.fab_cleanup++; 6309 vport->vport_stats.fab_cleanup++;
5762} 6310}
6311
6312/*
6313 * Stop notification from fabric SM. To be invoked from within FCS.
6314 */
6315void
6316bfa_fcs_vport_fcs_stop(struct bfa_fcs_vport_s *vport)
6317{
6318 bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP);
6319}
6320
5763/* 6321/*
5764 * delete notification from fabric SM. To be invoked from within FCS. 6322 * delete notification from fabric SM. To be invoked from within FCS.
5765 */ 6323 */
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index fe0463a1db0..cc43b2a58ce 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -30,14 +30,22 @@ static u32
30bfa_fcs_rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000; 30bfa_fcs_rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000;
31 /* In millisecs */ 31 /* In millisecs */
32/* 32/*
33 * bfa_fcs_rport_max_logins is max count of bfa_fcs_rports
34 * whereas DEF_CFG_NUM_RPORTS is max count of bfa_rports
35 */
36static u32 bfa_fcs_rport_max_logins = BFA_FCS_MAX_RPORT_LOGINS;
37
38/*
33 * forward declarations 39 * forward declarations
34 */ 40 */
35static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc( 41static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc(
36 struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid); 42 struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid);
37static void bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport); 43static void bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport);
38static void bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport); 44static void bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport);
39static void bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport); 45static void bfa_fcs_rport_fcs_online_action(struct bfa_fcs_rport_s *rport);
40static void bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport); 46static void bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport);
47static void bfa_fcs_rport_fcs_offline_action(struct bfa_fcs_rport_s *rport);
48static void bfa_fcs_rport_hal_offline_action(struct bfa_fcs_rport_s *rport);
41static void bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, 49static void bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport,
42 struct fc_logi_s *plogi); 50 struct fc_logi_s *plogi);
43static void bfa_fcs_rport_timeout(void *arg); 51static void bfa_fcs_rport_timeout(void *arg);
@@ -76,6 +84,7 @@ static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
76static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, 84static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
77 struct fchs_s *rx_fchs, u16 len); 85 struct fchs_s *rx_fchs, u16 len);
78static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport); 86static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
87static void bfa_fcs_rport_hal_offline(struct bfa_fcs_rport_s *rport);
79 88
80static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, 89static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport,
81 enum rport_event event); 90 enum rport_event event);
@@ -87,6 +96,8 @@ static void bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
87 enum rport_event event); 96 enum rport_event event);
88static void bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, 97static void bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport,
89 enum rport_event event); 98 enum rport_event event);
99static void bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport,
100 enum rport_event event);
90static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, 101static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
91 enum rport_event event); 102 enum rport_event event);
92static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, 103static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport,
@@ -123,6 +134,10 @@ static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
123 enum rport_event event); 134 enum rport_event event);
124static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, 135static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
125 enum rport_event event); 136 enum rport_event event);
137static void bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport,
138 enum rport_event event);
139static void bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport,
140 enum rport_event event);
126 141
127static struct bfa_sm_table_s rport_sm_table[] = { 142static struct bfa_sm_table_s rport_sm_table[] = {
128 {BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT}, 143 {BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT},
@@ -130,6 +145,7 @@ static struct bfa_sm_table_s rport_sm_table[] = {
130 {BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE}, 145 {BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE},
131 {BFA_SM(bfa_fcs_rport_sm_plogi_retry), BFA_RPORT_PLOGI_RETRY}, 146 {BFA_SM(bfa_fcs_rport_sm_plogi_retry), BFA_RPORT_PLOGI_RETRY},
132 {BFA_SM(bfa_fcs_rport_sm_plogi), BFA_RPORT_PLOGI}, 147 {BFA_SM(bfa_fcs_rport_sm_plogi), BFA_RPORT_PLOGI},
148 {BFA_SM(bfa_fcs_rport_sm_fc4_fcs_online), BFA_RPORT_ONLINE},
133 {BFA_SM(bfa_fcs_rport_sm_hal_online), BFA_RPORT_ONLINE}, 149 {BFA_SM(bfa_fcs_rport_sm_hal_online), BFA_RPORT_ONLINE},
134 {BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE}, 150 {BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE},
135 {BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY}, 151 {BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY},
@@ -167,8 +183,8 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
167 break; 183 break;
168 184
169 case RPSM_EVENT_PLOGI_RCVD: 185 case RPSM_EVENT_PLOGI_RCVD:
170 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); 186 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
171 bfa_fcs_rport_send_plogiacc(rport, NULL); 187 bfa_fcs_rport_fcs_online_action(rport);
172 break; 188 break;
173 189
174 case RPSM_EVENT_PLOGI_COMP: 190 case RPSM_EVENT_PLOGI_COMP:
@@ -252,8 +268,8 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
252 268
253 switch (event) { 269 switch (event) {
254 case RPSM_EVENT_FCXP_SENT: 270 case RPSM_EVENT_FCXP_SENT:
255 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); 271 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
256 bfa_fcs_rport_hal_online(rport); 272 bfa_fcs_rport_fcs_online_action(rport);
257 break; 273 break;
258 274
259 case RPSM_EVENT_DELETE: 275 case RPSM_EVENT_DELETE:
@@ -348,9 +364,9 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
348 break; 364 break;
349 365
350 case RPSM_EVENT_PLOGI_COMP: 366 case RPSM_EVENT_PLOGI_COMP:
351 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); 367 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
352 bfa_timer_stop(&rport->timer); 368 bfa_timer_stop(&rport->timer);
353 bfa_fcs_rport_hal_online(rport); 369 bfa_fcs_rport_fcs_online_action(rport);
354 break; 370 break;
355 371
356 default: 372 default:
@@ -370,9 +386,9 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
370 386
371 switch (event) { 387 switch (event) {
372 case RPSM_EVENT_ACCEPTED: 388 case RPSM_EVENT_ACCEPTED:
373 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); 389 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
374 rport->plogi_retries = 0; 390 rport->plogi_retries = 0;
375 bfa_fcs_rport_hal_online(rport); 391 bfa_fcs_rport_fcs_online_action(rport);
376 break; 392 break;
377 393
378 case RPSM_EVENT_LOGO_RCVD: 394 case RPSM_EVENT_LOGO_RCVD:
@@ -397,6 +413,7 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
397 BFA_FCS_RETRY_TIMEOUT); 413 BFA_FCS_RETRY_TIMEOUT);
398 } else { 414 } else {
399 bfa_stats(rport->port, rport_del_max_plogi_retry); 415 bfa_stats(rport->port, rport_del_max_plogi_retry);
416 rport->old_pid = rport->pid;
400 rport->pid = 0; 417 rport->pid = 0;
401 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); 418 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
402 bfa_timer_start(rport->fcs->bfa, &rport->timer, 419 bfa_timer_start(rport->fcs->bfa, &rport->timer,
@@ -443,13 +460,77 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
443 break; 460 break;
444 461
445 case RPSM_EVENT_PLOGI_COMP: 462 case RPSM_EVENT_PLOGI_COMP:
446 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); 463 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
447 bfa_fcxp_discard(rport->fcxp); 464 bfa_fcxp_discard(rport->fcxp);
448 bfa_fcs_rport_hal_online(rport); 465 bfa_fcs_rport_fcs_online_action(rport);
466 break;
467
468 default:
469 bfa_sm_fault(rport->fcs, event);
470 }
471}
472
473/*
474 * PLOGI is done. Await bfa_fcs_itnim to ascertain the scsi function
475 */
476static void
477bfa_fcs_rport_sm_fc4_fcs_online(struct bfa_fcs_rport_s *rport,
478 enum rport_event event)
479{
480 bfa_trc(rport->fcs, rport->pwwn);
481 bfa_trc(rport->fcs, rport->pid);
482 bfa_trc(rport->fcs, event);
483
484 switch (event) {
485 case RPSM_EVENT_FC4_FCS_ONLINE:
486 if (rport->scsi_function == BFA_RPORT_INITIATOR) {
487 if (!BFA_FCS_PID_IS_WKA(rport->pid))
488 bfa_fcs_rpf_rport_online(rport);
489 bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
490 break;
491 }
492
493 if (!rport->bfa_rport)
494 rport->bfa_rport =
495 bfa_rport_create(rport->fcs->bfa, rport);
496
497 if (rport->bfa_rport) {
498 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
499 bfa_fcs_rport_hal_online(rport);
500 } else {
501 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
502 bfa_fcs_rport_fcs_offline_action(rport);
503 }
504 break;
505
506 case RPSM_EVENT_PLOGI_RCVD:
507 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
508 rport->plogi_pending = BFA_TRUE;
509 bfa_fcs_rport_fcs_offline_action(rport);
510 break;
511
512 case RPSM_EVENT_PLOGI_COMP:
513 case RPSM_EVENT_LOGO_IMP:
514 case RPSM_EVENT_ADDRESS_CHANGE:
515 case RPSM_EVENT_SCN:
516 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
517 bfa_fcs_rport_fcs_offline_action(rport);
518 break;
519
520 case RPSM_EVENT_LOGO_RCVD:
521 case RPSM_EVENT_PRLO_RCVD:
522 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
523 bfa_fcs_rport_fcs_offline_action(rport);
524 break;
525
526 case RPSM_EVENT_DELETE:
527 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
528 bfa_fcs_rport_fcs_offline_action(rport);
449 break; 529 break;
450 530
451 default: 531 default:
452 bfa_sm_fault(rport->fcs, event); 532 bfa_sm_fault(rport->fcs, event);
533 break;
453 } 534 }
454} 535}
455 536
@@ -468,41 +549,34 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
468 switch (event) { 549 switch (event) {
469 case RPSM_EVENT_HCB_ONLINE: 550 case RPSM_EVENT_HCB_ONLINE:
470 bfa_sm_set_state(rport, bfa_fcs_rport_sm_online); 551 bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
471 bfa_fcs_rport_online_action(rport); 552 bfa_fcs_rport_hal_online_action(rport);
472 break; 553 break;
473 554
474 case RPSM_EVENT_PRLO_RCVD:
475 case RPSM_EVENT_PLOGI_COMP: 555 case RPSM_EVENT_PLOGI_COMP:
476 break; 556 break;
477 557
558 case RPSM_EVENT_PRLO_RCVD:
478 case RPSM_EVENT_LOGO_RCVD: 559 case RPSM_EVENT_LOGO_RCVD:
479 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv); 560 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
480 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); 561 bfa_fcs_rport_fcs_offline_action(rport);
481 break; 562 break;
482 563
564 case RPSM_EVENT_SCN:
483 case RPSM_EVENT_LOGO_IMP: 565 case RPSM_EVENT_LOGO_IMP:
484 case RPSM_EVENT_ADDRESS_CHANGE: 566 case RPSM_EVENT_ADDRESS_CHANGE:
485 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); 567 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
486 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); 568 bfa_fcs_rport_fcs_offline_action(rport);
487 break; 569 break;
488 570
489 case RPSM_EVENT_PLOGI_RCVD: 571 case RPSM_EVENT_PLOGI_RCVD:
490 rport->plogi_pending = BFA_TRUE; 572 rport->plogi_pending = BFA_TRUE;
491 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); 573 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
492 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); 574 bfa_fcs_rport_fcs_offline_action(rport);
493 break; 575 break;
494 576
495 case RPSM_EVENT_DELETE: 577 case RPSM_EVENT_DELETE:
496 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); 578 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
497 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); 579 bfa_fcs_rport_fcs_offline_action(rport);
498 break;
499
500 case RPSM_EVENT_SCN:
501 /*
502 * @todo
503 * Ignore SCN - PLOGI just completed, FC-4 login should detect
504 * device failures.
505 */
506 break; 580 break;
507 581
508 default: 582 default:
@@ -537,18 +611,18 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
537 case RPSM_EVENT_LOGO_IMP: 611 case RPSM_EVENT_LOGO_IMP:
538 case RPSM_EVENT_ADDRESS_CHANGE: 612 case RPSM_EVENT_ADDRESS_CHANGE:
539 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); 613 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
540 bfa_fcs_rport_offline_action(rport); 614 bfa_fcs_rport_hal_offline_action(rport);
541 break; 615 break;
542 616
543 case RPSM_EVENT_DELETE: 617 case RPSM_EVENT_DELETE:
544 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); 618 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
545 bfa_fcs_rport_offline_action(rport); 619 bfa_fcs_rport_hal_offline_action(rport);
546 break; 620 break;
547 621
548 case RPSM_EVENT_LOGO_RCVD: 622 case RPSM_EVENT_LOGO_RCVD:
549 case RPSM_EVENT_PRLO_RCVD: 623 case RPSM_EVENT_PRLO_RCVD:
550 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); 624 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
551 bfa_fcs_rport_offline_action(rport); 625 bfa_fcs_rport_hal_offline_action(rport);
552 break; 626 break;
553 627
554 case RPSM_EVENT_PLOGI_COMP: 628 case RPSM_EVENT_PLOGI_COMP:
@@ -579,7 +653,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
579 case RPSM_EVENT_DELETE: 653 case RPSM_EVENT_DELETE:
580 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); 654 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
581 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 655 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
582 bfa_fcs_rport_offline_action(rport); 656 bfa_fcs_rport_hal_offline_action(rport);
583 break; 657 break;
584 658
585 case RPSM_EVENT_SCN: 659 case RPSM_EVENT_SCN:
@@ -592,24 +666,16 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
592 case RPSM_EVENT_PRLO_RCVD: 666 case RPSM_EVENT_PRLO_RCVD:
593 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); 667 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
594 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 668 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
595 bfa_fcs_rport_offline_action(rport); 669 bfa_fcs_rport_hal_offline_action(rport);
596 break; 670 break;
597 671
598 case RPSM_EVENT_LOGO_IMP: 672 case RPSM_EVENT_LOGO_IMP:
599 rport->pid = 0;
600 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
601 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
602 bfa_timer_start(rport->fcs->bfa, &rport->timer,
603 bfa_fcs_rport_timeout, rport,
604 bfa_fcs_rport_del_timeout);
605 break;
606
607 case RPSM_EVENT_PLOGI_RCVD: 673 case RPSM_EVENT_PLOGI_RCVD:
608 case RPSM_EVENT_ADDRESS_CHANGE: 674 case RPSM_EVENT_ADDRESS_CHANGE:
609 case RPSM_EVENT_PLOGI_COMP: 675 case RPSM_EVENT_PLOGI_COMP:
610 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); 676 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
611 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 677 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
612 bfa_fcs_rport_offline_action(rport); 678 bfa_fcs_rport_hal_offline_action(rport);
613 break; 679 break;
614 680
615 default: 681 default:
@@ -642,14 +708,14 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
642 bfa_fcs_rport_send_nsdisc(rport, NULL); 708 bfa_fcs_rport_send_nsdisc(rport, NULL);
643 } else { 709 } else {
644 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); 710 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
645 bfa_fcs_rport_offline_action(rport); 711 bfa_fcs_rport_hal_offline_action(rport);
646 } 712 }
647 break; 713 break;
648 714
649 case RPSM_EVENT_DELETE: 715 case RPSM_EVENT_DELETE:
650 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); 716 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
651 bfa_fcxp_discard(rport->fcxp); 717 bfa_fcxp_discard(rport->fcxp);
652 bfa_fcs_rport_offline_action(rport); 718 bfa_fcs_rport_hal_offline_action(rport);
653 break; 719 break;
654 720
655 case RPSM_EVENT_SCN: 721 case RPSM_EVENT_SCN:
@@ -659,7 +725,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
659 case RPSM_EVENT_PRLO_RCVD: 725 case RPSM_EVENT_PRLO_RCVD:
660 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); 726 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
661 bfa_fcxp_discard(rport->fcxp); 727 bfa_fcxp_discard(rport->fcxp);
662 bfa_fcs_rport_offline_action(rport); 728 bfa_fcs_rport_hal_offline_action(rport);
663 break; 729 break;
664 730
665 case RPSM_EVENT_PLOGI_COMP: 731 case RPSM_EVENT_PLOGI_COMP:
@@ -668,7 +734,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
668 case RPSM_EVENT_LOGO_IMP: 734 case RPSM_EVENT_LOGO_IMP:
669 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); 735 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
670 bfa_fcxp_discard(rport->fcxp); 736 bfa_fcxp_discard(rport->fcxp);
671 bfa_fcs_rport_offline_action(rport); 737 bfa_fcs_rport_hal_offline_action(rport);
672 break; 738 break;
673 739
674 default: 740 default:
@@ -696,21 +762,21 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
696 case RPSM_EVENT_DELETE: 762 case RPSM_EVENT_DELETE:
697 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); 763 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
698 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 764 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
699 bfa_fcs_rport_offline_action(rport); 765 bfa_fcs_rport_hal_offline_action(rport);
700 break; 766 break;
701 767
702 case RPSM_EVENT_LOGO_IMP: 768 case RPSM_EVENT_LOGO_IMP:
703 case RPSM_EVENT_ADDRESS_CHANGE: 769 case RPSM_EVENT_ADDRESS_CHANGE:
704 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); 770 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
705 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 771 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
706 bfa_fcs_rport_offline_action(rport); 772 bfa_fcs_rport_hal_offline_action(rport);
707 break; 773 break;
708 774
709 case RPSM_EVENT_LOGO_RCVD: 775 case RPSM_EVENT_LOGO_RCVD:
710 case RPSM_EVENT_PRLO_RCVD: 776 case RPSM_EVENT_PRLO_RCVD:
711 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); 777 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
712 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 778 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
713 bfa_fcs_rport_offline_action(rport); 779 bfa_fcs_rport_hal_offline_action(rport);
714 break; 780 break;
715 781
716 case RPSM_EVENT_SCN: 782 case RPSM_EVENT_SCN:
@@ -719,7 +785,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
719 case RPSM_EVENT_PLOGI_RCVD: 785 case RPSM_EVENT_PLOGI_RCVD:
720 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); 786 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
721 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 787 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
722 bfa_fcs_rport_offline_action(rport); 788 bfa_fcs_rport_hal_offline_action(rport);
723 break; 789 break;
724 790
725 default: 791 default:
@@ -756,13 +822,13 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
756 case RPSM_EVENT_FAILED: 822 case RPSM_EVENT_FAILED:
757 case RPSM_EVENT_ADDRESS_CHANGE: 823 case RPSM_EVENT_ADDRESS_CHANGE:
758 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); 824 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
759 bfa_fcs_rport_offline_action(rport); 825 bfa_fcs_rport_hal_offline_action(rport);
760 break; 826 break;
761 827
762 case RPSM_EVENT_DELETE: 828 case RPSM_EVENT_DELETE:
763 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); 829 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
764 bfa_fcxp_discard(rport->fcxp); 830 bfa_fcxp_discard(rport->fcxp);
765 bfa_fcs_rport_offline_action(rport); 831 bfa_fcs_rport_hal_offline_action(rport);
766 break; 832 break;
767 833
768 case RPSM_EVENT_SCN: 834 case RPSM_EVENT_SCN:
@@ -774,14 +840,14 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
774 case RPSM_EVENT_LOGO_IMP: 840 case RPSM_EVENT_LOGO_IMP:
775 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); 841 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
776 bfa_fcxp_discard(rport->fcxp); 842 bfa_fcxp_discard(rport->fcxp);
777 bfa_fcs_rport_offline_action(rport); 843 bfa_fcs_rport_hal_offline_action(rport);
778 break; 844 break;
779 845
780 case RPSM_EVENT_LOGO_RCVD: 846 case RPSM_EVENT_LOGO_RCVD:
781 case RPSM_EVENT_PRLO_RCVD: 847 case RPSM_EVENT_PRLO_RCVD:
782 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); 848 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
783 bfa_fcxp_discard(rport->fcxp); 849 bfa_fcxp_discard(rport->fcxp);
784 bfa_fcs_rport_offline_action(rport); 850 bfa_fcs_rport_hal_offline_action(rport);
785 break; 851 break;
786 852
787 default: 853 default:
@@ -803,13 +869,19 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
803 switch (event) { 869 switch (event) {
804 case RPSM_EVENT_FC4_OFFLINE: 870 case RPSM_EVENT_FC4_OFFLINE:
805 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv); 871 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
806 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); 872 bfa_fcs_rport_hal_offline(rport);
807 break; 873 break;
808 874
809 case RPSM_EVENT_DELETE: 875 case RPSM_EVENT_DELETE:
810 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); 876 if (rport->pid && (rport->prlo == BFA_TRUE))
877 bfa_fcs_rport_send_prlo_acc(rport);
878 if (rport->pid && (rport->prlo == BFA_FALSE))
879 bfa_fcs_rport_send_logo_acc(rport);
880
881 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
811 break; 882 break;
812 883
884 case RPSM_EVENT_HCB_ONLINE:
813 case RPSM_EVENT_LOGO_RCVD: 885 case RPSM_EVENT_LOGO_RCVD:
814 case RPSM_EVENT_PRLO_RCVD: 886 case RPSM_EVENT_PRLO_RCVD:
815 case RPSM_EVENT_ADDRESS_CHANGE: 887 case RPSM_EVENT_ADDRESS_CHANGE:
@@ -835,7 +907,20 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
835 switch (event) { 907 switch (event) {
836 case RPSM_EVENT_FC4_OFFLINE: 908 case RPSM_EVENT_FC4_OFFLINE:
837 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); 909 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
838 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); 910 bfa_fcs_rport_hal_offline(rport);
911 break;
912
913 case RPSM_EVENT_LOGO_RCVD:
914 bfa_fcs_rport_send_logo_acc(rport);
915 case RPSM_EVENT_PRLO_RCVD:
916 if (rport->prlo == BFA_TRUE)
917 bfa_fcs_rport_send_prlo_acc(rport);
918 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
919 break;
920
921 case RPSM_EVENT_HCB_ONLINE:
922 case RPSM_EVENT_DELETE:
923 /* Rport is being deleted */
839 break; 924 break;
840 925
841 default: 926 default:
@@ -857,13 +942,23 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
857 switch (event) { 942 switch (event) {
858 case RPSM_EVENT_FC4_OFFLINE: 943 case RPSM_EVENT_FC4_OFFLINE:
859 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); 944 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
860 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); 945 bfa_fcs_rport_hal_offline(rport);
861 break; 946 break;
862 947
863 case RPSM_EVENT_SCN:
864 case RPSM_EVENT_LOGO_IMP:
865 case RPSM_EVENT_LOGO_RCVD: 948 case RPSM_EVENT_LOGO_RCVD:
949 /*
950 * Rport is going offline. Just ack the logo
951 */
952 bfa_fcs_rport_send_logo_acc(rport);
953 break;
954
866 case RPSM_EVENT_PRLO_RCVD: 955 case RPSM_EVENT_PRLO_RCVD:
956 bfa_fcs_rport_send_prlo_acc(rport);
957 break;
958
959 case RPSM_EVENT_HCB_ONLINE:
960 case RPSM_EVENT_SCN:
961 case RPSM_EVENT_LOGO_IMP:
867 case RPSM_EVENT_ADDRESS_CHANGE: 962 case RPSM_EVENT_ADDRESS_CHANGE:
868 /* 963 /*
869 * rport is already going offline. 964 * rport is already going offline.
@@ -907,24 +1002,23 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
907 */ 1002 */
908 1003
909 case RPSM_EVENT_ADDRESS_CHANGE: 1004 case RPSM_EVENT_ADDRESS_CHANGE:
910 if (bfa_fcs_lport_is_online(rport->port)) { 1005 if (!bfa_fcs_lport_is_online(rport->port)) {
911 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
912 bfa_sm_set_state(rport,
913 bfa_fcs_rport_sm_nsdisc_sending);
914 rport->ns_retries = 0;
915 bfa_fcs_rport_send_nsdisc(rport, NULL);
916 } else {
917 bfa_sm_set_state(rport,
918 bfa_fcs_rport_sm_plogi_sending);
919 rport->plogi_retries = 0;
920 bfa_fcs_rport_send_plogi(rport, NULL);
921 }
922 } else {
923 rport->pid = 0; 1006 rport->pid = 0;
924 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); 1007 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
925 bfa_timer_start(rport->fcs->bfa, &rport->timer, 1008 bfa_timer_start(rport->fcs->bfa, &rport->timer,
926 bfa_fcs_rport_timeout, rport, 1009 bfa_fcs_rport_timeout, rport,
927 bfa_fcs_rport_del_timeout); 1010 bfa_fcs_rport_del_timeout);
1011 break;
1012 }
1013 if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
1014 bfa_sm_set_state(rport,
1015 bfa_fcs_rport_sm_nsdisc_sending);
1016 rport->ns_retries = 0;
1017 bfa_fcs_rport_send_nsdisc(rport, NULL);
1018 } else {
1019 bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
1020 rport->plogi_retries = 0;
1021 bfa_fcs_rport_send_plogi(rport, NULL);
928 } 1022 }
929 break; 1023 break;
930 1024
@@ -1001,7 +1095,11 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
1001 break; 1095 break;
1002 1096
1003 case RPSM_EVENT_DELETE: 1097 case RPSM_EVENT_DELETE:
1004 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); 1098 bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
1099 if (rport->pid && (rport->prlo == BFA_TRUE))
1100 bfa_fcs_rport_send_prlo_acc(rport);
1101 if (rport->pid && (rport->prlo == BFA_FALSE))
1102 bfa_fcs_rport_send_logo_acc(rport);
1005 break; 1103 break;
1006 1104
1007 case RPSM_EVENT_LOGO_IMP: 1105 case RPSM_EVENT_LOGO_IMP:
@@ -1040,7 +1138,14 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
1040 break; 1138 break;
1041 1139
1042 case RPSM_EVENT_LOGO_RCVD: 1140 case RPSM_EVENT_LOGO_RCVD:
1141 bfa_fcs_rport_send_logo_acc(rport);
1043 case RPSM_EVENT_PRLO_RCVD: 1142 case RPSM_EVENT_PRLO_RCVD:
1143 if (rport->prlo == BFA_TRUE)
1144 bfa_fcs_rport_send_prlo_acc(rport);
1145
1146 bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
1147 break;
1148
1044 case RPSM_EVENT_ADDRESS_CHANGE: 1149 case RPSM_EVENT_ADDRESS_CHANGE:
1045 break; 1150 break;
1046 1151
@@ -1072,7 +1177,11 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
1072 break; 1177 break;
1073 1178
1074 case RPSM_EVENT_LOGO_RCVD: 1179 case RPSM_EVENT_LOGO_RCVD:
1180 bfa_fcs_rport_send_logo_acc(rport);
1075 case RPSM_EVENT_PRLO_RCVD: 1181 case RPSM_EVENT_PRLO_RCVD:
1182 if (rport->prlo == BFA_TRUE)
1183 bfa_fcs_rport_send_prlo_acc(rport);
1184
1076 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); 1185 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
1077 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 1186 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
1078 bfa_fcs_rport_free(rport); 1187 bfa_fcs_rport_free(rport);
@@ -1126,9 +1235,9 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
1126 break; 1235 break;
1127 1236
1128 case RPSM_EVENT_PLOGI_COMP: 1237 case RPSM_EVENT_PLOGI_COMP:
1129 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); 1238 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
1130 bfa_timer_stop(&rport->timer); 1239 bfa_timer_stop(&rport->timer);
1131 bfa_fcs_rport_hal_online(rport); 1240 bfa_fcs_rport_fcs_online_action(rport);
1132 break; 1241 break;
1133 1242
1134 case RPSM_EVENT_PLOGI_SEND: 1243 case RPSM_EVENT_PLOGI_SEND:
@@ -1190,9 +1299,9 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
1190 break; 1299 break;
1191 1300
1192 case RPSM_EVENT_PLOGI_COMP: 1301 case RPSM_EVENT_PLOGI_COMP:
1193 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); 1302 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
1194 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); 1303 bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
1195 bfa_fcs_rport_hal_online(rport); 1304 bfa_fcs_rport_fcs_online_action(rport);
1196 break; 1305 break;
1197 1306
1198 default: 1307 default:
@@ -1254,9 +1363,9 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
1254 break; 1363 break;
1255 1364
1256 case RPSM_EVENT_PLOGI_COMP: 1365 case RPSM_EVENT_PLOGI_COMP:
1257 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); 1366 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
1258 bfa_timer_stop(&rport->timer); 1367 bfa_timer_stop(&rport->timer);
1259 bfa_fcs_rport_hal_online(rport); 1368 bfa_fcs_rport_fcs_online_action(rport);
1260 break; 1369 break;
1261 1370
1262 default: 1371 default:
@@ -1296,6 +1405,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1296 bfa_fcs_rport_sm_nsdisc_sending); 1405 bfa_fcs_rport_sm_nsdisc_sending);
1297 bfa_fcs_rport_send_nsdisc(rport, NULL); 1406 bfa_fcs_rport_send_nsdisc(rport, NULL);
1298 } else { 1407 } else {
1408 rport->old_pid = rport->pid;
1299 rport->pid = 0; 1409 rport->pid = 0;
1300 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); 1410 bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
1301 bfa_timer_start(rport->fcs->bfa, &rport->timer, 1411 bfa_timer_start(rport->fcs->bfa, &rport->timer,
@@ -1343,9 +1453,9 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1343 break; 1453 break;
1344 1454
1345 case RPSM_EVENT_PLOGI_COMP: 1455 case RPSM_EVENT_PLOGI_COMP:
1346 bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); 1456 bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online);
1347 bfa_fcxp_discard(rport->fcxp); 1457 bfa_fcxp_discard(rport->fcxp);
1348 bfa_fcs_rport_hal_online(rport); 1458 bfa_fcs_rport_fcs_online_action(rport);
1349 break; 1459 break;
1350 1460
1351 default: 1461 default:
@@ -1353,7 +1463,63 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
1353 } 1463 }
1354} 1464}
1355 1465
1466/*
1467 * Rport needs to be deleted
1468 * waiting for ITNIM clean up to finish
1469 */
1470static void
1471bfa_fcs_rport_sm_fc4_off_delete(struct bfa_fcs_rport_s *rport,
1472 enum rport_event event)
1473{
1474 bfa_trc(rport->fcs, rport->pwwn);
1475 bfa_trc(rport->fcs, rport->pid);
1476 bfa_trc(rport->fcs, event);
1356 1477
1478 switch (event) {
1479 case RPSM_EVENT_FC4_OFFLINE:
1480 bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
1481 bfa_fcs_rport_hal_offline(rport);
1482 break;
1483
1484 case RPSM_EVENT_DELETE:
1485 case RPSM_EVENT_PLOGI_RCVD:
1486 /* Ignore these events */
1487 break;
1488
1489 default:
1490 bfa_sm_fault(rport->fcs, event);
1491 break;
1492 }
1493}
1494
1495/*
1496 * RPort needs to be deleted
1497 * waiting for BFA/FW to finish current processing
1498 */
1499static void
1500bfa_fcs_rport_sm_delete_pending(struct bfa_fcs_rport_s *rport,
1501 enum rport_event event)
1502{
1503 bfa_trc(rport->fcs, rport->pwwn);
1504 bfa_trc(rport->fcs, rport->pid);
1505 bfa_trc(rport->fcs, event);
1506
1507 switch (event) {
1508 case RPSM_EVENT_HCB_OFFLINE:
1509 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
1510 bfa_fcs_rport_free(rport);
1511 break;
1512
1513 case RPSM_EVENT_DELETE:
1514 case RPSM_EVENT_LOGO_IMP:
1515 case RPSM_EVENT_PLOGI_RCVD:
1516 /* Ignore these events */
1517 break;
1518
1519 default:
1520 bfa_sm_fault(rport->fcs, event);
1521 }
1522}
1357 1523
1358/* 1524/*
1359 * fcs_rport_private FCS RPORT provate functions 1525 * fcs_rport_private FCS RPORT provate functions
@@ -1370,10 +1536,11 @@ bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1370 1536
1371 bfa_trc(rport->fcs, rport->pwwn); 1537 bfa_trc(rport->fcs, rport->pwwn);
1372 1538
1373 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1539 fcxp = fcxp_alloced ? fcxp_alloced :
1540 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
1374 if (!fcxp) { 1541 if (!fcxp) {
1375 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, 1542 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1376 bfa_fcs_rport_send_plogi, rport); 1543 bfa_fcs_rport_send_plogi, rport, BFA_TRUE);
1377 return; 1544 return;
1378 } 1545 }
1379 rport->fcxp = fcxp; 1546 rport->fcxp = fcxp;
@@ -1490,10 +1657,11 @@ bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1490 bfa_trc(rport->fcs, rport->pwwn); 1657 bfa_trc(rport->fcs, rport->pwwn);
1491 bfa_trc(rport->fcs, rport->reply_oxid); 1658 bfa_trc(rport->fcs, rport->reply_oxid);
1492 1659
1493 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1660 fcxp = fcxp_alloced ? fcxp_alloced :
1661 bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
1494 if (!fcxp) { 1662 if (!fcxp) {
1495 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, 1663 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1496 bfa_fcs_rport_send_plogiacc, rport); 1664 bfa_fcs_rport_send_plogiacc, rport, BFA_FALSE);
1497 return; 1665 return;
1498 } 1666 }
1499 rport->fcxp = fcxp; 1667 rport->fcxp = fcxp;
@@ -1522,10 +1690,11 @@ bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1522 1690
1523 bfa_trc(rport->fcs, rport->pwwn); 1691 bfa_trc(rport->fcs, rport->pwwn);
1524 1692
1525 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1693 fcxp = fcxp_alloced ? fcxp_alloced :
1694 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
1526 if (!fcxp) { 1695 if (!fcxp) {
1527 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, 1696 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1528 bfa_fcs_rport_send_adisc, rport); 1697 bfa_fcs_rport_send_adisc, rport, BFA_TRUE);
1529 return; 1698 return;
1530 } 1699 }
1531 rport->fcxp = fcxp; 1700 rport->fcxp = fcxp;
@@ -1585,10 +1754,11 @@ bfa_fcs_rport_send_nsdisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1585 1754
1586 bfa_trc(rport->fcs, rport->pid); 1755 bfa_trc(rport->fcs, rport->pid);
1587 1756
1588 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1757 fcxp = fcxp_alloced ? fcxp_alloced :
1758 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
1589 if (!fcxp) { 1759 if (!fcxp) {
1590 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, 1760 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1591 bfa_fcs_rport_send_nsdisc, rport); 1761 bfa_fcs_rport_send_nsdisc, rport, BFA_TRUE);
1592 return; 1762 return;
1593 } 1763 }
1594 rport->fcxp = fcxp; 1764 rport->fcxp = fcxp;
@@ -1741,10 +1911,11 @@ bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
1741 1911
1742 port = rport->port; 1912 port = rport->port;
1743 1913
1744 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 1914 fcxp = fcxp_alloced ? fcxp_alloced :
1915 bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
1745 if (!fcxp) { 1916 if (!fcxp) {
1746 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe, 1917 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
1747 bfa_fcs_rport_send_logo, rport); 1918 bfa_fcs_rport_send_logo, rport, BFA_FALSE);
1748 return; 1919 return;
1749 } 1920 }
1750 rport->fcxp = fcxp; 1921 rport->fcxp = fcxp;
@@ -1778,7 +1949,7 @@ bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
1778 1949
1779 port = rport->port; 1950 port = rport->port;
1780 1951
1781 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 1952 fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
1782 if (!fcxp) 1953 if (!fcxp)
1783 return; 1954 return;
1784 1955
@@ -1849,7 +2020,7 @@ bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport,
1849 bfa_fcs_itnim_is_initiator(rport->itnim); 2020 bfa_fcs_itnim_is_initiator(rport->itnim);
1850 } 2021 }
1851 2022
1852 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 2023 fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
1853 if (!fcxp) 2024 if (!fcxp)
1854 return; 2025 return;
1855 2026
@@ -1886,7 +2057,7 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
1886 2057
1887 speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed); 2058 speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed);
1888 2059
1889 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 2060 fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
1890 if (!fcxp) 2061 if (!fcxp)
1891 return; 2062 return;
1892 2063
@@ -1920,7 +2091,7 @@ bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
1920 */ 2091 */
1921 if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) { 2092 if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) {
1922 2093
1923 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 2094 fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
1924 if (!fcxp) 2095 if (!fcxp)
1925 return; 2096 return;
1926 2097
@@ -1957,6 +2128,15 @@ bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport)
1957 bfa_rport_online(rport->bfa_rport, &rport_info); 2128 bfa_rport_online(rport->bfa_rport, &rport_info);
1958} 2129}
1959 2130
2131static void
2132bfa_fcs_rport_hal_offline(struct bfa_fcs_rport_s *rport)
2133{
2134 if (rport->bfa_rport)
2135 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
2136 else
2137 bfa_cb_rport_offline(rport);
2138}
2139
1960static struct bfa_fcs_rport_s * 2140static struct bfa_fcs_rport_s *
1961bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid) 2141bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
1962{ 2142{
@@ -1967,6 +2147,11 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
1967 /* 2147 /*
1968 * allocate rport 2148 * allocate rport
1969 */ 2149 */
2150 if (fcs->num_rport_logins >= bfa_fcs_rport_max_logins) {
2151 bfa_trc(fcs, rpid);
2152 return NULL;
2153 }
2154
1970 if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv) 2155 if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
1971 != BFA_STATUS_OK) { 2156 != BFA_STATUS_OK) {
1972 bfa_trc(fcs, rpid); 2157 bfa_trc(fcs, rpid);
@@ -1981,16 +2166,9 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
1981 rport->rp_drv = rport_drv; 2166 rport->rp_drv = rport_drv;
1982 rport->pid = rpid; 2167 rport->pid = rpid;
1983 rport->pwwn = pwwn; 2168 rport->pwwn = pwwn;
2169 rport->old_pid = 0;
1984 2170
1985 /* 2171 rport->bfa_rport = NULL;
1986 * allocate BFA rport
1987 */
1988 rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport);
1989 if (!rport->bfa_rport) {
1990 bfa_trc(fcs, rpid);
1991 kfree(rport_drv);
1992 return NULL;
1993 }
1994 2172
1995 /* 2173 /*
1996 * allocate FC-4s 2174 * allocate FC-4s
@@ -2001,14 +2179,13 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
2001 rport->itnim = bfa_fcs_itnim_create(rport); 2179 rport->itnim = bfa_fcs_itnim_create(rport);
2002 if (!rport->itnim) { 2180 if (!rport->itnim) {
2003 bfa_trc(fcs, rpid); 2181 bfa_trc(fcs, rpid);
2004 bfa_sm_send_event(rport->bfa_rport,
2005 BFA_RPORT_SM_DELETE);
2006 kfree(rport_drv); 2182 kfree(rport_drv);
2007 return NULL; 2183 return NULL;
2008 } 2184 }
2009 } 2185 }
2010 2186
2011 bfa_fcs_lport_add_rport(port, rport); 2187 bfa_fcs_lport_add_rport(port, rport);
2188 fcs->num_rport_logins++;
2012 2189
2013 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); 2190 bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
2014 2191
@@ -2024,20 +2201,28 @@ static void
2024bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport) 2201bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
2025{ 2202{
2026 struct bfa_fcs_lport_s *port = rport->port; 2203 struct bfa_fcs_lport_s *port = rport->port;
2204 struct bfa_fcs_s *fcs = port->fcs;
2027 2205
2028 /* 2206 /*
2029 * - delete FC-4s 2207 * - delete FC-4s
2030 * - delete BFA rport 2208 * - delete BFA rport
2031 * - remove from queue of rports 2209 * - remove from queue of rports
2032 */ 2210 */
2211 rport->plogi_pending = BFA_FALSE;
2212
2033 if (bfa_fcs_lport_is_initiator(port)) { 2213 if (bfa_fcs_lport_is_initiator(port)) {
2034 bfa_fcs_itnim_delete(rport->itnim); 2214 bfa_fcs_itnim_delete(rport->itnim);
2035 if (rport->pid != 0 && !BFA_FCS_PID_IS_WKA(rport->pid)) 2215 if (rport->pid != 0 && !BFA_FCS_PID_IS_WKA(rport->pid))
2036 bfa_fcs_rpf_rport_offline(rport); 2216 bfa_fcs_rpf_rport_offline(rport);
2037 } 2217 }
2038 2218
2039 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE); 2219 if (rport->bfa_rport) {
2220 bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE);
2221 rport->bfa_rport = NULL;
2222 }
2223
2040 bfa_fcs_lport_del_rport(port, rport); 2224 bfa_fcs_lport_del_rport(port, rport);
2225 fcs->num_rport_logins--;
2041 kfree(rport->rp_drv); 2226 kfree(rport->rp_drv);
2042} 2227}
2043 2228
@@ -2071,7 +2256,18 @@ bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
2071} 2256}
2072 2257
2073static void 2258static void
2074bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport) 2259bfa_fcs_rport_fcs_online_action(struct bfa_fcs_rport_s *rport)
2260{
2261 if ((!rport->pid) || (!rport->pwwn)) {
2262 bfa_trc(rport->fcs, rport->pid);
2263 bfa_sm_fault(rport->fcs, rport->pid);
2264 }
2265
2266 bfa_sm_send_event(rport->itnim, BFA_FCS_ITNIM_SM_FCS_ONLINE);
2267}
2268
2269static void
2270bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport)
2075{ 2271{
2076 struct bfa_fcs_lport_s *port = rport->port; 2272 struct bfa_fcs_lport_s *port = rport->port;
2077 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; 2273 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
@@ -2086,7 +2282,7 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
2086 } 2282 }
2087 2283
2088 if (bfa_fcs_lport_is_initiator(port)) { 2284 if (bfa_fcs_lport_is_initiator(port)) {
2089 bfa_fcs_itnim_rport_online(rport->itnim); 2285 bfa_fcs_itnim_brp_online(rport->itnim);
2090 if (!BFA_FCS_PID_IS_WKA(rport->pid)) 2286 if (!BFA_FCS_PID_IS_WKA(rport->pid))
2091 bfa_fcs_rpf_rport_online(rport); 2287 bfa_fcs_rpf_rport_online(rport);
2092 }; 2288 };
@@ -2102,15 +2298,28 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
2102} 2298}
2103 2299
2104static void 2300static void
2105bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport) 2301bfa_fcs_rport_fcs_offline_action(struct bfa_fcs_rport_s *rport)
2302{
2303 if (!BFA_FCS_PID_IS_WKA(rport->pid))
2304 bfa_fcs_rpf_rport_offline(rport);
2305
2306 bfa_fcs_itnim_rport_offline(rport->itnim);
2307}
2308
2309static void
2310bfa_fcs_rport_hal_offline_action(struct bfa_fcs_rport_s *rport)
2106{ 2311{
2107 struct bfa_fcs_lport_s *port = rport->port; 2312 struct bfa_fcs_lport_s *port = rport->port;
2108 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad; 2313 struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
2109 char lpwwn_buf[BFA_STRING_32]; 2314 char lpwwn_buf[BFA_STRING_32];
2110 char rpwwn_buf[BFA_STRING_32]; 2315 char rpwwn_buf[BFA_STRING_32];
2111 2316
2317 if (!rport->bfa_rport) {
2318 bfa_fcs_rport_fcs_offline_action(rport);
2319 return;
2320 }
2321
2112 rport->stats.offlines++; 2322 rport->stats.offlines++;
2113 rport->plogi_pending = BFA_FALSE;
2114 2323
2115 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); 2324 wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
2116 wwn2str(rpwwn_buf, rport->pwwn); 2325 wwn2str(rpwwn_buf, rport->pwwn);
@@ -2340,7 +2549,6 @@ bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
2340 bfa_sm_send_event(rport, RPSM_EVENT_SCN); 2549 bfa_sm_send_event(rport, RPSM_EVENT_SCN);
2341} 2550}
2342 2551
2343
2344/* 2552/*
2345 * brief 2553 * brief
2346 * This routine BFA callback for bfa_rport_online() call. 2554 * This routine BFA callback for bfa_rport_online() call.
@@ -2508,7 +2716,7 @@ bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport)
2508 2716
2509 bfa_trc(rport->fcs, rport->pid); 2717 bfa_trc(rport->fcs, rport->pid);
2510 2718
2511 fcxp = bfa_fcs_fcxp_alloc(port->fcs); 2719 fcxp = bfa_fcs_fcxp_alloc(port->fcs, BFA_FALSE);
2512 if (!fcxp) 2720 if (!fcxp)
2513 return; 2721 return;
2514 len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), 2722 len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
@@ -2534,7 +2742,7 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
2534 2742
2535 bfa_trc(rport->fcs, rx_fchs->s_id); 2743 bfa_trc(rport->fcs, rx_fchs->s_id);
2536 2744
2537 fcxp = bfa_fcs_fcxp_alloc(rport->fcs); 2745 fcxp = bfa_fcs_fcxp_alloc(rport->fcs, BFA_FALSE);
2538 if (!fcxp) 2746 if (!fcxp)
2539 return; 2747 return;
2540 2748
@@ -2582,6 +2790,17 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id)
2582 bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD); 2790 bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD);
2583} 2791}
2584 2792
2793/*
2794 * Called by BFAD to set the max limit on number of bfa_fcs_rport allocation
2795 * which limits number of concurrent logins to remote ports
2796 */
2797void
2798bfa_fcs_rport_set_max_logins(u32 max_logins)
2799{
2800 if (max_logins > 0)
2801 bfa_fcs_rport_max_logins = max_logins;
2802}
2803
2585void 2804void
2586bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, 2805bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
2587 struct bfa_rport_attr_s *rport_attr) 2806 struct bfa_rport_attr_s *rport_attr)
@@ -2605,9 +2824,11 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
2605 rport_attr->curr_speed = rport->rpf.rpsc_speed; 2824 rport_attr->curr_speed = rport->rpf.rpsc_speed;
2606 rport_attr->assigned_speed = rport->rpf.assigned_speed; 2825 rport_attr->assigned_speed = rport->rpf.assigned_speed;
2607 2826
2608 qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority; 2827 if (rport->bfa_rport) {
2609 qos_attr.qos_flow_id = 2828 qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority;
2610 cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id); 2829 qos_attr.qos_flow_id =
2830 cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id);
2831 }
2611 rport_attr->qos_attr = qos_attr; 2832 rport_attr->qos_attr = qos_attr;
2612 2833
2613 rport_attr->trl_enforced = BFA_FALSE; 2834 rport_attr->trl_enforced = BFA_FALSE;
@@ -2940,10 +3161,11 @@ bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced)
2940 3161
2941 bfa_trc(rport->fcs, rport->pwwn); 3162 bfa_trc(rport->fcs, rport->pwwn);
2942 3163
2943 fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); 3164 fcxp = fcxp_alloced ? fcxp_alloced :
3165 bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE);
2944 if (!fcxp) { 3166 if (!fcxp) {
2945 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe, 3167 bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe,
2946 bfa_fcs_rpf_send_rpsc2, rpf); 3168 bfa_fcs_rpf_send_rpsc2, rpf, BFA_TRUE);
2947 return; 3169 return;
2948 } 3170 }
2949 rpf->fcxp = fcxp; 3171 rpf->fcxp = fcxp;
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 21ad2902e5c..75ca8752b9f 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -92,7 +92,6 @@ static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
92 enum bfa_ioc_event_e event); 92 enum bfa_ioc_event_e event);
93static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); 93static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
94static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); 94static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
95static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
96static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc); 95static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
97static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); 96static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
98 97
@@ -599,8 +598,9 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
599 break; 598 break;
600 599
601 case IOC_E_HWERROR: 600 case IOC_E_HWERROR:
601 case IOC_E_HWFAILED:
602 /* 602 /*
603 * HB failure notification, ignore. 603 * HB failure / HW error notification, ignore.
604 */ 604 */
605 break; 605 break;
606 default: 606 default:
@@ -632,6 +632,10 @@ bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
632 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); 632 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
633 break; 633 break;
634 634
635 case IOC_E_HWERROR:
636 /* Ignore - already in hwfail state */
637 break;
638
635 default: 639 default:
636 bfa_sm_fault(ioc, event); 640 bfa_sm_fault(ioc, event);
637 } 641 }
@@ -1455,7 +1459,7 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1455 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); 1459 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1456 1460
1457 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { 1461 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1458 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) { 1462 if (fwhdr->md5sum[i] != cpu_to_le32(drv_fwhdr->md5sum[i])) {
1459 bfa_trc(ioc, i); 1463 bfa_trc(ioc, i);
1460 bfa_trc(ioc, fwhdr->md5sum[i]); 1464 bfa_trc(ioc, fwhdr->md5sum[i]);
1461 bfa_trc(ioc, drv_fwhdr->md5sum[i]); 1465 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
@@ -1480,7 +1484,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1480 drv_fwhdr = (struct bfi_ioc_image_hdr_s *) 1484 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1481 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); 1485 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1482 1486
1483 if (fwhdr.signature != drv_fwhdr->signature) { 1487 if (fwhdr.signature != cpu_to_le32(drv_fwhdr->signature)) {
1484 bfa_trc(ioc, fwhdr.signature); 1488 bfa_trc(ioc, fwhdr.signature);
1485 bfa_trc(ioc, drv_fwhdr->signature); 1489 bfa_trc(ioc, drv_fwhdr->signature);
1486 return BFA_FALSE; 1490 return BFA_FALSE;
@@ -1704,7 +1708,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1704 * write smem 1708 * write smem
1705 */ 1709 */
1706 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 1710 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1707 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]); 1711 cpu_to_le32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]));
1708 1712
1709 loff += sizeof(u32); 1713 loff += sizeof(u32);
1710 1714
@@ -2260,6 +2264,12 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc)
2260 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 2264 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2261} 2265}
2262 2266
2267void
2268bfa_ioc_suspend(struct bfa_ioc_s *ioc)
2269{
2270 ioc->dbg_fwsave_once = BFA_TRUE;
2271 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2272}
2263 2273
2264/* 2274/*
2265 * Initialize memory for saving firmware trace. Driver must initialize 2275 * Initialize memory for saving firmware trace. Driver must initialize
@@ -2269,7 +2279,7 @@ void
2269bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) 2279bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2270{ 2280{
2271 ioc->dbg_fwsave = dbg_fwsave; 2281 ioc->dbg_fwsave = dbg_fwsave;
2272 ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0; 2282 ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
2273} 2283}
2274 2284
2275/* 2285/*
@@ -2856,7 +2866,7 @@ bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2856/* 2866/*
2857 * Save firmware trace if configured. 2867 * Save firmware trace if configured.
2858 */ 2868 */
2859static void 2869void
2860bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc) 2870bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
2861{ 2871{
2862 int tlen; 2872 int tlen;
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 7b916e04ca5..b2856f96567 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -820,6 +820,7 @@ void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
820 struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod); 820 struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod);
821void bfa_ioc_auto_recover(bfa_boolean_t auto_recover); 821void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
822void bfa_ioc_detach(struct bfa_ioc_s *ioc); 822void bfa_ioc_detach(struct bfa_ioc_s *ioc);
823void bfa_ioc_suspend(struct bfa_ioc_s *ioc);
823void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, 824void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
824 enum bfi_pcifn_class clscode); 825 enum bfi_pcifn_class clscode);
825void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa); 826void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa);
@@ -866,6 +867,7 @@ bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
866void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event); 867void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
867bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats); 868bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
868bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc); 869bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
870void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
869 871
870/* 872/*
871 * asic block configuration related APIs 873 * asic block configuration related APIs
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 2d36e482383..189fff71e3c 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -121,6 +121,7 @@ struct bfa_s {
121 bfa_boolean_t fcs; /* FCS is attached to BFA */ 121 bfa_boolean_t fcs; /* FCS is attached to BFA */
122 struct bfa_msix_s msix; 122 struct bfa_msix_s msix;
123 int bfa_aen_seq; 123 int bfa_aen_seq;
124 bfa_boolean_t intr_enabled; /* Status of interrupts */
124}; 125};
125 126
126extern bfa_boolean_t bfa_auto_recover; 127extern bfa_boolean_t bfa_auto_recover;
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 2e856e6710f..b2538d60db3 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -440,9 +440,11 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
440 fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod); 440 fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
441 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps); 441 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
442 442
443 INIT_LIST_HEAD(&mod->fcxp_free_q); 443 INIT_LIST_HEAD(&mod->fcxp_req_free_q);
444 INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
444 INIT_LIST_HEAD(&mod->fcxp_active_q); 445 INIT_LIST_HEAD(&mod->fcxp_active_q);
445 INIT_LIST_HEAD(&mod->fcxp_unused_q); 446 INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
447 INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
446 448
447 mod->fcxp_list = fcxp; 449 mod->fcxp_list = fcxp;
448 450
@@ -450,7 +452,14 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
450 fcxp->fcxp_mod = mod; 452 fcxp->fcxp_mod = mod;
451 fcxp->fcxp_tag = i; 453 fcxp->fcxp_tag = i;
452 454
453 list_add_tail(&fcxp->qe, &mod->fcxp_free_q); 455 if (i < (mod->num_fcxps / 2)) {
456 list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
457 fcxp->req_rsp = BFA_TRUE;
458 } else {
459 list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
460 fcxp->req_rsp = BFA_FALSE;
461 }
462
454 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp); 463 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
455 fcxp->reqq_waiting = BFA_FALSE; 464 fcxp->reqq_waiting = BFA_FALSE;
456 465
@@ -514,7 +523,8 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
514 if (!cfg->drvcfg.min_cfg) 523 if (!cfg->drvcfg.min_cfg)
515 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ; 524 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
516 525
517 INIT_LIST_HEAD(&mod->wait_q); 526 INIT_LIST_HEAD(&mod->req_wait_q);
527 INIT_LIST_HEAD(&mod->rsp_wait_q);
518 528
519 claim_fcxps_mem(mod); 529 claim_fcxps_mem(mod);
520} 530}
@@ -542,7 +552,8 @@ bfa_fcxp_iocdisable(struct bfa_s *bfa)
542 struct list_head *qe, *qen; 552 struct list_head *qe, *qen;
543 553
544 /* Enqueue unused fcxp resources to free_q */ 554 /* Enqueue unused fcxp resources to free_q */
545 list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q); 555 list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
556 list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
546 557
547 list_for_each_safe(qe, qen, &mod->fcxp_active_q) { 558 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
548 fcxp = (struct bfa_fcxp_s *) qe; 559 fcxp = (struct bfa_fcxp_s *) qe;
@@ -559,11 +570,14 @@ bfa_fcxp_iocdisable(struct bfa_s *bfa)
559} 570}
560 571
561static struct bfa_fcxp_s * 572static struct bfa_fcxp_s *
562bfa_fcxp_get(struct bfa_fcxp_mod_s *fm) 573bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
563{ 574{
564 struct bfa_fcxp_s *fcxp; 575 struct bfa_fcxp_s *fcxp;
565 576
566 bfa_q_deq(&fm->fcxp_free_q, &fcxp); 577 if (req)
578 bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
579 else
580 bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
567 581
568 if (fcxp) 582 if (fcxp)
569 list_add_tail(&fcxp->qe, &fm->fcxp_active_q); 583 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
@@ -642,7 +656,11 @@ bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
642 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; 656 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
643 struct bfa_fcxp_wqe_s *wqe; 657 struct bfa_fcxp_wqe_s *wqe;
644 658
645 bfa_q_deq(&mod->wait_q, &wqe); 659 if (fcxp->req_rsp)
660 bfa_q_deq(&mod->req_wait_q, &wqe);
661 else
662 bfa_q_deq(&mod->rsp_wait_q, &wqe);
663
646 if (wqe) { 664 if (wqe) {
647 bfa_trc(mod->bfa, fcxp->fcxp_tag); 665 bfa_trc(mod->bfa, fcxp->fcxp_tag);
648 666
@@ -657,7 +675,11 @@ bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
657 675
658 WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp)); 676 WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
659 list_del(&fcxp->qe); 677 list_del(&fcxp->qe);
660 list_add_tail(&fcxp->qe, &mod->fcxp_free_q); 678
679 if (fcxp->req_rsp)
680 list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
681 else
682 list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
661} 683}
662 684
663static void 685static void
@@ -900,21 +922,23 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
900 * Address (given the sge index). 922 * Address (given the sge index).
901 * @param[in] get_rsp_sglen function ptr to be called to get a response SG 923 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
902 * len (given the sge index). 924 * len (given the sge index).
925 * @param[in] req Allocated FCXP is used to send req or rsp?
926 * request - BFA_TRUE, response - BFA_FALSE
903 * 927 *
904 * @return FCXP instance. NULL on failure. 928 * @return FCXP instance. NULL on failure.
905 */ 929 */
906struct bfa_fcxp_s * 930struct bfa_fcxp_s *
907bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles, 931bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
908 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, 932 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
909 bfa_fcxp_get_sglen_t req_sglen_cbfn, 933 bfa_fcxp_get_sglen_t req_sglen_cbfn,
910 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, 934 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
911 bfa_fcxp_get_sglen_t rsp_sglen_cbfn) 935 bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
912{ 936{
913 struct bfa_fcxp_s *fcxp = NULL; 937 struct bfa_fcxp_s *fcxp = NULL;
914 938
915 WARN_ON(bfa == NULL); 939 WARN_ON(bfa == NULL);
916 940
917 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa)); 941 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
918 if (fcxp == NULL) 942 if (fcxp == NULL)
919 return NULL; 943 return NULL;
920 944
@@ -1071,17 +1095,20 @@ bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1071} 1095}
1072 1096
1073void 1097void
1074bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, 1098bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1075 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg, 1099 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1076 void *caller, int nreq_sgles, 1100 void *caller, int nreq_sgles,
1077 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, 1101 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1078 bfa_fcxp_get_sglen_t req_sglen_cbfn, 1102 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1079 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, 1103 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1080 bfa_fcxp_get_sglen_t rsp_sglen_cbfn) 1104 bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
1081{ 1105{
1082 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); 1106 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1083 1107
1084 WARN_ON(!list_empty(&mod->fcxp_free_q)); 1108 if (req)
1109 WARN_ON(!list_empty(&mod->fcxp_req_free_q));
1110 else
1111 WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
1085 1112
1086 wqe->alloc_cbfn = alloc_cbfn; 1113 wqe->alloc_cbfn = alloc_cbfn;
1087 wqe->alloc_cbarg = alloc_cbarg; 1114 wqe->alloc_cbarg = alloc_cbarg;
@@ -1094,7 +1121,10 @@ bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1094 wqe->rsp_sga_cbfn = rsp_sga_cbfn; 1121 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1095 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn; 1122 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1096 1123
1097 list_add_tail(&wqe->qe, &mod->wait_q); 1124 if (req)
1125 list_add_tail(&wqe->qe, &mod->req_wait_q);
1126 else
1127 list_add_tail(&wqe->qe, &mod->rsp_wait_q);
1098} 1128}
1099 1129
1100void 1130void
@@ -1102,7 +1132,8 @@ bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1102{ 1132{
1103 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); 1133 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1104 1134
1105 WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe)); 1135 WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
1136 !bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
1106 list_del(&wqe->qe); 1137 list_del(&wqe->qe);
1107} 1138}
1108 1139
@@ -1153,8 +1184,13 @@ bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1153 int i; 1184 int i;
1154 1185
1155 for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) { 1186 for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1156 bfa_q_deq_tail(&mod->fcxp_free_q, &qe); 1187 if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
1157 list_add_tail(qe, &mod->fcxp_unused_q); 1188 bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
1189 list_add_tail(qe, &mod->fcxp_req_unused_q);
1190 } else {
1191 bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
1192 list_add_tail(qe, &mod->fcxp_rsp_unused_q);
1193 }
1158 } 1194 }
1159} 1195}
1160 1196
@@ -1404,11 +1440,11 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1404 1440
1405 switch (event) { 1441 switch (event) {
1406 case BFA_LPS_SM_FWRSP: 1442 case BFA_LPS_SM_FWRSP:
1443 case BFA_LPS_SM_OFFLINE:
1407 bfa_sm_set_state(lps, bfa_lps_sm_init); 1444 bfa_sm_set_state(lps, bfa_lps_sm_init);
1408 bfa_lps_logout_comp(lps); 1445 bfa_lps_logout_comp(lps);
1409 break; 1446 break;
1410 1447
1411 case BFA_LPS_SM_OFFLINE:
1412 case BFA_LPS_SM_DELETE: 1448 case BFA_LPS_SM_DELETE:
1413 bfa_sm_set_state(lps, bfa_lps_sm_init); 1449 bfa_sm_set_state(lps, bfa_lps_sm_init);
1414 break; 1450 break;
@@ -1786,6 +1822,8 @@ bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1786 1822
1787 if (lps->fdisc) 1823 if (lps->fdisc)
1788 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); 1824 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1825 else
1826 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
1789} 1827}
1790 1828
1791/* 1829/*
@@ -4237,6 +4275,10 @@ bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4237 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); 4275 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4238 break; 4276 break;
4239 4277
4278 case BFA_RPORT_SM_OFFLINE:
4279 bfa_rport_offline_cb(rp);
4280 break;
4281
4240 default: 4282 default:
4241 bfa_stats(rp, sm_off_unexp); 4283 bfa_stats(rp, sm_off_unexp);
4242 bfa_sm_fault(rp->bfa, event); 4284 bfa_sm_fault(rp->bfa, event);
@@ -4353,6 +4395,7 @@ bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4353 case BFA_RPORT_SM_HWFAIL: 4395 case BFA_RPORT_SM_HWFAIL:
4354 bfa_stats(rp, sm_offp_hwf); 4396 bfa_stats(rp, sm_offp_hwf);
4355 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable); 4397 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4398 bfa_rport_offline_cb(rp);
4356 break; 4399 break;
4357 4400
4358 default: 4401 default:
@@ -4731,8 +4774,10 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4731 WARN_ON(speed == 0); 4774 WARN_ON(speed == 0);
4732 WARN_ON(speed == BFA_PORT_SPEED_AUTO); 4775 WARN_ON(speed == BFA_PORT_SPEED_AUTO);
4733 4776
4734 rport->rport_info.speed = speed; 4777 if (rport) {
4735 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED); 4778 rport->rport_info.speed = speed;
4779 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4780 }
4736} 4781}
4737 4782
4738/* Set Rport LUN Mask */ 4783/* Set Rport LUN Mask */
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index f3006756463..1abcf7c5166 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -97,10 +97,13 @@ struct bfa_fcxp_mod_s {
97 struct bfa_s *bfa; /* backpointer to BFA */ 97 struct bfa_s *bfa; /* backpointer to BFA */
98 struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */ 98 struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */
99 u16 num_fcxps; /* max num FCXP requests */ 99 u16 num_fcxps; /* max num FCXP requests */
100 struct list_head fcxp_free_q; /* free FCXPs */ 100 struct list_head fcxp_req_free_q; /* free FCXPs used for sending req */
101 struct list_head fcxp_active_q; /* active FCXPs */ 101 struct list_head fcxp_rsp_free_q; /* free FCXPs used for sending req */
102 struct list_head wait_q; /* wait queue for free fcxp */ 102 struct list_head fcxp_active_q; /* active FCXPs */
103 struct list_head fcxp_unused_q; /* unused fcxps */ 103 struct list_head req_wait_q; /* wait queue for free req_fcxp */
104 struct list_head rsp_wait_q; /* wait queue for free rsp_fcxp */
105 struct list_head fcxp_req_unused_q; /* unused req_fcxps */
106 struct list_head fcxp_rsp_unused_q; /* unused rsp_fcxps */
104 u32 req_pld_sz; 107 u32 req_pld_sz;
105 u32 rsp_pld_sz; 108 u32 rsp_pld_sz;
106 struct bfa_mem_dma_s dma_seg[BFA_FCXP_DMA_SEGS]; 109 struct bfa_mem_dma_s dma_seg[BFA_FCXP_DMA_SEGS];
@@ -197,6 +200,7 @@ struct bfa_fcxp_s {
197 struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */ 200 struct bfa_cb_qe_s hcb_qe; /* comp: callback qelem */
198 struct bfa_reqq_wait_s reqq_wqe; 201 struct bfa_reqq_wait_s reqq_wqe;
199 bfa_boolean_t reqq_waiting; 202 bfa_boolean_t reqq_waiting;
203 bfa_boolean_t req_rsp; /* Used to track req/rsp fcxp */
200}; 204};
201 205
202struct bfa_fcxp_wqe_s { 206struct bfa_fcxp_wqe_s {
@@ -586,20 +590,22 @@ void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
586/* 590/*
587 * bfa fcxp API functions 591 * bfa fcxp API functions
588 */ 592 */
589struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa, 593struct bfa_fcxp_s *bfa_fcxp_req_rsp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
590 int nreq_sgles, int nrsp_sgles, 594 int nreq_sgles, int nrsp_sgles,
591 bfa_fcxp_get_sgaddr_t get_req_sga, 595 bfa_fcxp_get_sgaddr_t get_req_sga,
592 bfa_fcxp_get_sglen_t get_req_sglen, 596 bfa_fcxp_get_sglen_t get_req_sglen,
593 bfa_fcxp_get_sgaddr_t get_rsp_sga, 597 bfa_fcxp_get_sgaddr_t get_rsp_sga,
594 bfa_fcxp_get_sglen_t get_rsp_sglen); 598 bfa_fcxp_get_sglen_t get_rsp_sglen,
595void bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, 599 bfa_boolean_t req);
600void bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
596 bfa_fcxp_alloc_cbfn_t alloc_cbfn, 601 bfa_fcxp_alloc_cbfn_t alloc_cbfn,
597 void *cbarg, void *bfad_fcxp, 602 void *cbarg, void *bfad_fcxp,
598 int nreq_sgles, int nrsp_sgles, 603 int nreq_sgles, int nrsp_sgles,
599 bfa_fcxp_get_sgaddr_t get_req_sga, 604 bfa_fcxp_get_sgaddr_t get_req_sga,
600 bfa_fcxp_get_sglen_t get_req_sglen, 605 bfa_fcxp_get_sglen_t get_req_sglen,
601 bfa_fcxp_get_sgaddr_t get_rsp_sga, 606 bfa_fcxp_get_sgaddr_t get_rsp_sga,
602 bfa_fcxp_get_sglen_t get_rsp_sglen); 607 bfa_fcxp_get_sglen_t get_rsp_sglen,
608 bfa_boolean_t req);
603void bfa_fcxp_walloc_cancel(struct bfa_s *bfa, 609void bfa_fcxp_walloc_cancel(struct bfa_s *bfa,
604 struct bfa_fcxp_wqe_s *wqe); 610 struct bfa_fcxp_wqe_s *wqe);
605void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp); 611void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp);
@@ -658,6 +664,7 @@ u8 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag);
658u32 bfa_lps_get_base_pid(struct bfa_s *bfa); 664u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
659u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid); 665u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
660void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status); 666void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
667void bfa_cb_lps_flogo_comp(void *bfad, void *uarg);
661void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status); 668void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
662void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg); 669void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
663void bfa_cb_lps_cvl_event(void *bfad, void *uarg); 670void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 2c8f0c71307..c37494916a1 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -57,6 +57,7 @@ int pcie_max_read_reqsz;
57int bfa_debugfs_enable = 1; 57int bfa_debugfs_enable = 1;
58int msix_disable_cb = 0, msix_disable_ct = 0; 58int msix_disable_cb = 0, msix_disable_ct = 0;
59int max_xfer_size = BFAD_MAX_SECTORS >> 1; 59int max_xfer_size = BFAD_MAX_SECTORS >> 1;
60int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
60 61
61/* Firmware releated */ 62/* Firmware releated */
62u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; 63u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
@@ -148,6 +149,8 @@ MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
148module_param(max_xfer_size, int, S_IRUGO | S_IWUSR); 149module_param(max_xfer_size, int, S_IRUGO | S_IWUSR);
149MODULE_PARM_DESC(max_xfer_size, "default=32MB," 150MODULE_PARM_DESC(max_xfer_size, "default=32MB,"
150 " Range[64k|128k|256k|512k|1024k|2048k]"); 151 " Range[64k|128k|256k|512k|1024k|2048k]");
152module_param(max_rport_logins, int, S_IRUGO | S_IWUSR);
153MODULE_PARM_DESC(max_rport_logins, "Max number of logins to initiator and target rports on a port (physical/logical), default=1024");
151 154
152static void 155static void
153bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event); 156bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
@@ -736,6 +739,9 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
736 } 739 }
737 } 740 }
738 741
742 /* Enable PCIE Advanced Error Recovery (AER) if kernel supports */
743 pci_enable_pcie_error_reporting(pdev);
744
739 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); 745 bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
740 bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2)); 746 bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
741 747
@@ -806,6 +812,8 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
806 } 812 }
807 } 813 }
808 814
815 pci_save_state(pdev);
816
809 return 0; 817 return 0;
810 818
811out_release_region: 819out_release_region:
@@ -822,6 +830,8 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
822 pci_iounmap(pdev, bfad->pci_bar0_kva); 830 pci_iounmap(pdev, bfad->pci_bar0_kva);
823 pci_iounmap(pdev, bfad->pci_bar2_kva); 831 pci_iounmap(pdev, bfad->pci_bar2_kva);
824 pci_release_regions(pdev); 832 pci_release_regions(pdev);
833 /* Disable PCIE Advanced Error Recovery (AER) */
834 pci_disable_pcie_error_reporting(pdev);
825 pci_disable_device(pdev); 835 pci_disable_device(pdev);
826 pci_set_drvdata(pdev, NULL); 836 pci_set_drvdata(pdev, NULL);
827} 837}
@@ -1258,6 +1268,16 @@ bfad_setup_intr(struct bfad_s *bfad)
1258 1268
1259 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); 1269 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
1260 if (error) { 1270 if (error) {
1271 /* In CT1 & CT2, try to allocate just one vector */
1272 if (bfa_asic_id_ctc(pdev->device)) {
1273 printk(KERN_WARNING "bfa %s: trying one msix "
1274 "vector failed to allocate %d[%d]\n",
1275 bfad->pci_name, bfad->nvec, error);
1276 bfad->nvec = 1;
1277 error = pci_enable_msix(bfad->pcidev,
1278 msix_entries, bfad->nvec);
1279 }
1280
1261 /* 1281 /*
1262 * Only error number of vector is available. 1282 * Only error number of vector is available.
1263 * We don't have a mechanism to map multiple 1283 * We don't have a mechanism to map multiple
@@ -1267,12 +1287,13 @@ bfad_setup_intr(struct bfad_s *bfad)
1267 * vectors. Linux doesn't duplicate vectors 1287 * vectors. Linux doesn't duplicate vectors
1268 * in the MSIX table for this case. 1288 * in the MSIX table for this case.
1269 */ 1289 */
1270 1290 if (error) {
1271 printk(KERN_WARNING "bfad%d: " 1291 printk(KERN_WARNING "bfad%d: "
1272 "pci_enable_msix failed (%d)," 1292 "pci_enable_msix failed (%d), "
1273 " use line based.\n", bfad->inst_no, error); 1293 "use line based.\n",
1274 1294 bfad->inst_no, error);
1275 goto line_based; 1295 goto line_based;
1296 }
1276 } 1297 }
1277 1298
1278 /* Disable INTX in MSI-X mode */ 1299 /* Disable INTX in MSI-X mode */
@@ -1470,6 +1491,197 @@ bfad_pci_remove(struct pci_dev *pdev)
1470 kfree(bfad); 1491 kfree(bfad);
1471} 1492}
1472 1493
1494/*
1495 * PCI Error Recovery entry, error detected.
1496 */
1497static pci_ers_result_t
1498bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
1499{
1500 struct bfad_s *bfad = pci_get_drvdata(pdev);
1501 unsigned long flags;
1502 pci_ers_result_t ret = PCI_ERS_RESULT_NONE;
1503
1504 dev_printk(KERN_ERR, &pdev->dev,
1505 "error detected state: %d - flags: 0x%x\n",
1506 state, bfad->bfad_flags);
1507
1508 switch (state) {
1509 case pci_channel_io_normal: /* non-fatal error */
1510 spin_lock_irqsave(&bfad->bfad_lock, flags);
1511 bfad->bfad_flags &= ~BFAD_EEH_BUSY;
1512 /* Suspend/fail all bfa operations */
1513 bfa_ioc_suspend(&bfad->bfa.ioc);
1514 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1515 del_timer_sync(&bfad->hal_tmo);
1516 ret = PCI_ERS_RESULT_CAN_RECOVER;
1517 break;
1518 case pci_channel_io_frozen: /* fatal error */
1519 init_completion(&bfad->comp);
1520 spin_lock_irqsave(&bfad->bfad_lock, flags);
1521 bfad->bfad_flags |= BFAD_EEH_BUSY;
1522 /* Suspend/fail all bfa operations */
1523 bfa_ioc_suspend(&bfad->bfa.ioc);
1524 bfa_fcs_stop(&bfad->bfa_fcs);
1525 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1526 wait_for_completion(&bfad->comp);
1527
1528 bfad_remove_intr(bfad);
1529 del_timer_sync(&bfad->hal_tmo);
1530 pci_disable_device(pdev);
1531 ret = PCI_ERS_RESULT_NEED_RESET;
1532 break;
1533 case pci_channel_io_perm_failure: /* PCI Card is DEAD */
1534 spin_lock_irqsave(&bfad->bfad_lock, flags);
1535 bfad->bfad_flags |= BFAD_EEH_BUSY |
1536 BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE;
1537 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1538
1539 /* If the error_detected handler is called with the reason
1540 * pci_channel_io_perm_failure - it will subsequently call
1541 * pci_remove() entry point to remove the pci device from the
1542 * system - So defer the cleanup to pci_remove(); cleaning up
1543 * here causes inconsistent state during pci_remove().
1544 */
1545 ret = PCI_ERS_RESULT_DISCONNECT;
1546 break;
1547 default:
1548 WARN_ON(1);
1549 }
1550
1551 return ret;
1552}
1553
1554int
1555restart_bfa(struct bfad_s *bfad)
1556{
1557 unsigned long flags;
1558 struct pci_dev *pdev = bfad->pcidev;
1559
1560 bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg,
1561 &bfad->meminfo, &bfad->hal_pcidev);
1562
1563 /* Enable Interrupt and wait bfa_init completion */
1564 if (bfad_setup_intr(bfad)) {
1565 dev_printk(KERN_WARNING, &pdev->dev,
1566 "%s: bfad_setup_intr failed\n", bfad->pci_name);
1567 bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED);
1568 return -1;
1569 }
1570
1571 init_completion(&bfad->comp);
1572 spin_lock_irqsave(&bfad->bfad_lock, flags);
1573 bfa_iocfc_init(&bfad->bfa);
1574 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1575
1576 /* Set up interrupt handler for each vectors */
1577 if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
1578 bfad_install_msix_handler(bfad))
1579 dev_printk(KERN_WARNING, &pdev->dev,
1580 "%s: install_msix failed.\n", bfad->pci_name);
1581
1582 bfad_init_timer(bfad);
1583 wait_for_completion(&bfad->comp);
1584 bfad_drv_start(bfad);
1585
1586 return 0;
1587}
1588
1589/*
1590 * PCI Error Recovery entry, re-initialize the chip.
1591 */
1592static pci_ers_result_t
1593bfad_pci_slot_reset(struct pci_dev *pdev)
1594{
1595 struct bfad_s *bfad = pci_get_drvdata(pdev);
1596 u8 byte;
1597
1598 dev_printk(KERN_ERR, &pdev->dev,
1599 "bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags);
1600
1601 if (pci_enable_device(pdev)) {
1602 dev_printk(KERN_ERR, &pdev->dev, "Cannot re-enable "
1603 "PCI device after reset.\n");
1604 return PCI_ERS_RESULT_DISCONNECT;
1605 }
1606
1607 pci_restore_state(pdev);
1608
1609 /*
1610 * Read some byte (e.g. DMA max. payload size which can't
1611 * be 0xff any time) to make sure - we did not hit another PCI error
1612 * in the middle of recovery. If we did, then declare permanent failure.
1613 */
1614 pci_read_config_byte(pdev, 0x68, &byte);
1615 if (byte == 0xff) {
1616 dev_printk(KERN_ERR, &pdev->dev,
1617 "slot_reset failed ... got another PCI error !\n");
1618 goto out_disable_device;
1619 }
1620
1621 pci_save_state(pdev);
1622 pci_set_master(pdev);
1623
1624 if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(64)) != 0)
1625 if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0)
1626 goto out_disable_device;
1627
1628 pci_cleanup_aer_uncorrect_error_status(pdev);
1629
1630 if (restart_bfa(bfad) == -1)
1631 goto out_disable_device;
1632
1633 pci_enable_pcie_error_reporting(pdev);
1634 dev_printk(KERN_WARNING, &pdev->dev,
1635 "slot_reset completed flags: 0x%x!\n", bfad->bfad_flags);
1636
1637 return PCI_ERS_RESULT_RECOVERED;
1638
1639out_disable_device:
1640 pci_disable_device(pdev);
1641 return PCI_ERS_RESULT_DISCONNECT;
1642}
1643
1644static pci_ers_result_t
1645bfad_pci_mmio_enabled(struct pci_dev *pdev)
1646{
1647 unsigned long flags;
1648 struct bfad_s *bfad = pci_get_drvdata(pdev);
1649
1650 dev_printk(KERN_INFO, &pdev->dev, "mmio_enabled\n");
1651
1652 /* Fetch FW diagnostic information */
1653 bfa_ioc_debug_save_ftrc(&bfad->bfa.ioc);
1654
1655 /* Cancel all pending IOs */
1656 spin_lock_irqsave(&bfad->bfad_lock, flags);
1657 init_completion(&bfad->comp);
1658 bfa_fcs_stop(&bfad->bfa_fcs);
1659 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1660 wait_for_completion(&bfad->comp);
1661
1662 bfad_remove_intr(bfad);
1663 del_timer_sync(&bfad->hal_tmo);
1664 pci_disable_device(pdev);
1665
1666 return PCI_ERS_RESULT_NEED_RESET;
1667}
1668
1669static void
1670bfad_pci_resume(struct pci_dev *pdev)
1671{
1672 unsigned long flags;
1673 struct bfad_s *bfad = pci_get_drvdata(pdev);
1674
1675 dev_printk(KERN_WARNING, &pdev->dev, "resume\n");
1676
1677 /* wait until the link is online */
1678 bfad_rport_online_wait(bfad);
1679
1680 spin_lock_irqsave(&bfad->bfad_lock, flags);
1681 bfad->bfad_flags &= ~BFAD_EEH_BUSY;
1682 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1683}
1684
1473struct pci_device_id bfad_id_table[] = { 1685struct pci_device_id bfad_id_table[] = {
1474 { 1686 {
1475 .vendor = BFA_PCI_VENDOR_ID_BROCADE, 1687 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
@@ -1513,11 +1725,22 @@ struct pci_device_id bfad_id_table[] = {
1513 1725
1514MODULE_DEVICE_TABLE(pci, bfad_id_table); 1726MODULE_DEVICE_TABLE(pci, bfad_id_table);
1515 1727
1728/*
1729 * PCI error recovery handlers.
1730 */
1731static struct pci_error_handlers bfad_err_handler = {
1732 .error_detected = bfad_pci_error_detected,
1733 .slot_reset = bfad_pci_slot_reset,
1734 .mmio_enabled = bfad_pci_mmio_enabled,
1735 .resume = bfad_pci_resume,
1736};
1737
1516static struct pci_driver bfad_pci_driver = { 1738static struct pci_driver bfad_pci_driver = {
1517 .name = BFAD_DRIVER_NAME, 1739 .name = BFAD_DRIVER_NAME,
1518 .id_table = bfad_id_table, 1740 .id_table = bfad_id_table,
1519 .probe = bfad_pci_probe, 1741 .probe = bfad_pci_probe,
1520 .remove = __devexit_p(bfad_pci_remove), 1742 .remove = __devexit_p(bfad_pci_remove),
1743 .err_handler = &bfad_err_handler,
1521}; 1744};
1522 1745
1523/* 1746/*
@@ -1546,6 +1769,7 @@ bfad_init(void)
1546 1769
1547 bfa_auto_recover = ioc_auto_recover; 1770 bfa_auto_recover = ioc_auto_recover;
1548 bfa_fcs_rport_set_del_timeout(rport_del_timeout); 1771 bfa_fcs_rport_set_del_timeout(rport_del_timeout);
1772 bfa_fcs_rport_set_max_logins(max_rport_logins);
1549 1773
1550 error = pci_register_driver(&bfad_pci_driver); 1774 error = pci_register_driver(&bfad_pci_driver);
1551 if (error) { 1775 if (error) {
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index b8392744017..72f5dc32cc1 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -587,6 +587,37 @@ bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
587 return 0; 587 return 0;
588} 588}
589 589
590void
591bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)
592{
593 struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
594 struct bfad_im_port_s *im_port =
595 (struct bfad_im_port_s *)vport->drv_port.im_port;
596 struct bfad_s *bfad = im_port->bfad;
597 struct Scsi_Host *vshost = vport->drv_port.im_port->shost;
598 char *sym_name = fc_vport->symbolic_name;
599 struct bfa_fcs_vport_s *fcs_vport;
600 wwn_t pwwn;
601 unsigned long flags;
602
603 u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
604
605 spin_lock_irqsave(&bfad->bfad_lock, flags);
606 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn);
607 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
608
609 if (fcs_vport == NULL)
610 return;
611
612 spin_lock_irqsave(&bfad->bfad_lock, flags);
613 if (strlen(sym_name) > 0) {
614 strcpy(fcs_vport->lport.port_cfg.sym_name.symname, sym_name);
615 bfa_fcs_lport_ns_util_send_rspn_id(
616 BFA_FCS_GET_NS_FROM_PORT((&fcs_vport->lport)), NULL);
617 }
618 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
619}
620
590struct fc_function_template bfad_im_fc_function_template = { 621struct fc_function_template bfad_im_fc_function_template = {
591 622
592 /* Target dynamic attributes */ 623 /* Target dynamic attributes */
@@ -640,6 +671,7 @@ struct fc_function_template bfad_im_fc_function_template = {
640 .vport_create = bfad_im_vport_create, 671 .vport_create = bfad_im_vport_create,
641 .vport_delete = bfad_im_vport_delete, 672 .vport_delete = bfad_im_vport_delete,
642 .vport_disable = bfad_im_vport_disable, 673 .vport_disable = bfad_im_vport_disable,
674 .set_vport_symbolic_name = bfad_im_vport_set_symbolic_name,
643 .bsg_request = bfad_im_bsg_request, 675 .bsg_request = bfad_im_bsg_request,
644 .bsg_timeout = bfad_im_bsg_timeout, 676 .bsg_timeout = bfad_im_bsg_timeout,
645}; 677};
@@ -792,6 +824,13 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
792 else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc)) 824 else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
793 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, 825 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
794 "Brocade 16Gbps PCIe dual port FC HBA"); 826 "Brocade 16Gbps PCIe dual port FC HBA");
827 } else if (!strcmp(model, "Brocade-1867")) {
828 if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
829 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
830 "Brocade 16Gbps PCIe single port FC HBA for IBM");
831 else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
832 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
833 "Brocade 16Gbps PCIe dual port FC HBA for IBM");
795 } else 834 } else
796 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, 835 snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
797 "Invalid Model"); 836 "Invalid Model");
@@ -909,15 +948,16 @@ bfad_im_num_of_discovered_ports_show(struct device *dev,
909 struct bfad_port_s *port = im_port->port; 948 struct bfad_port_s *port = im_port->port;
910 struct bfad_s *bfad = im_port->bfad; 949 struct bfad_s *bfad = im_port->bfad;
911 int nrports = 2048; 950 int nrports = 2048;
912 wwn_t *rports = NULL; 951 struct bfa_rport_qualifier_s *rports = NULL;
913 unsigned long flags; 952 unsigned long flags;
914 953
915 rports = kzalloc(sizeof(wwn_t) * nrports , GFP_ATOMIC); 954 rports = kzalloc(sizeof(struct bfa_rport_qualifier_s) * nrports,
955 GFP_ATOMIC);
916 if (rports == NULL) 956 if (rports == NULL)
917 return snprintf(buf, PAGE_SIZE, "Failed\n"); 957 return snprintf(buf, PAGE_SIZE, "Failed\n");
918 958
919 spin_lock_irqsave(&bfad->bfad_lock, flags); 959 spin_lock_irqsave(&bfad->bfad_lock, flags);
920 bfa_fcs_lport_get_rports(port->fcs_port, rports, &nrports); 960 bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports);
921 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 961 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
922 kfree(rports); 962 kfree(rports);
923 963
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 9c1495b321d..0afa39076ce 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -535,7 +535,8 @@ bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
535 535
536 if (bfad_chk_iocmd_sz(payload_len, 536 if (bfad_chk_iocmd_sz(payload_len,
537 sizeof(struct bfa_bsg_lport_get_rports_s), 537 sizeof(struct bfa_bsg_lport_get_rports_s),
538 sizeof(wwn_t) * iocmd->nrports) != BFA_STATUS_OK) { 538 sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports)
539 != BFA_STATUS_OK) {
539 iocmd->status = BFA_STATUS_VERSION_FAIL; 540 iocmd->status = BFA_STATUS_VERSION_FAIL;
540 return 0; 541 return 0;
541 } 542 }
@@ -552,8 +553,9 @@ bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
552 goto out; 553 goto out;
553 } 554 }
554 555
555 bfa_fcs_lport_get_rports(fcs_port, (wwn_t *)iocmd_bufptr, 556 bfa_fcs_lport_get_rport_quals(fcs_port,
556 &iocmd->nrports); 557 (struct bfa_rport_qualifier_s *)iocmd_bufptr,
558 &iocmd->nrports);
557 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 559 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
558 iocmd->status = BFA_STATUS_OK; 560 iocmd->status = BFA_STATUS_OK;
559out: 561out:
@@ -578,7 +580,11 @@ bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
578 goto out; 580 goto out;
579 } 581 }
580 582
581 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); 583 if (iocmd->pid)
584 fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port,
585 iocmd->rpwwn, iocmd->pid);
586 else
587 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
582 if (fcs_rport == NULL) { 588 if (fcs_rport == NULL) {
583 bfa_trc(bfad, 0); 589 bfa_trc(bfad, 0);
584 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 590 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -671,9 +677,11 @@ bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
671 677
672 memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats, 678 memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
673 sizeof(struct bfa_rport_stats_s)); 679 sizeof(struct bfa_rport_stats_s));
674 memcpy((void *)&iocmd->stats.hal_stats, 680 if (bfa_fcs_rport_get_halrport(fcs_rport)) {
675 (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats), 681 memcpy((void *)&iocmd->stats.hal_stats,
676 sizeof(struct bfa_rport_hal_stats_s)); 682 (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
683 sizeof(struct bfa_rport_hal_stats_s));
684 }
677 685
678 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 686 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
679 iocmd->status = BFA_STATUS_OK; 687 iocmd->status = BFA_STATUS_OK;
@@ -709,7 +717,8 @@ bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
709 717
710 memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s)); 718 memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
711 rport = bfa_fcs_rport_get_halrport(fcs_rport); 719 rport = bfa_fcs_rport_get_halrport(fcs_rport);
712 memset(&rport->stats, 0, sizeof(rport->stats)); 720 if (rport)
721 memset(&rport->stats, 0, sizeof(rport->stats));
713 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 722 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
714 iocmd->status = BFA_STATUS_OK; 723 iocmd->status = BFA_STATUS_OK;
715out: 724out:
@@ -744,7 +753,8 @@ bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
744 fcs_rport->rpf.assigned_speed = iocmd->speed; 753 fcs_rport->rpf.assigned_speed = iocmd->speed;
745 /* Set this speed in f/w only if the RPSC speed is not available */ 754 /* Set this speed in f/w only if the RPSC speed is not available */
746 if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN) 755 if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
747 bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed); 756 if (fcs_rport->bfa_rport)
757 bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
748 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 758 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
749 iocmd->status = BFA_STATUS_OK; 759 iocmd->status = BFA_STATUS_OK;
750out: 760out:
@@ -1030,9 +1040,10 @@ bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
1030 iocmd->status = BFA_STATUS_UNKNOWN_RWWN; 1040 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1031 else { 1041 else {
1032 iocmd->status = BFA_STATUS_OK; 1042 iocmd->status = BFA_STATUS_OK;
1033 memcpy((void *)&iocmd->iostats, (void *) 1043 if (bfa_fcs_itnim_get_halitn(itnim))
1034 &(bfa_fcs_itnim_get_halitn(itnim)->stats), 1044 memcpy((void *)&iocmd->iostats, (void *)
1035 sizeof(struct bfa_itnim_iostats_s)); 1045 &(bfa_fcs_itnim_get_halitn(itnim)->stats),
1046 sizeof(struct bfa_itnim_iostats_s));
1036 } 1047 }
1037 } 1048 }
1038 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1049 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -2949,13 +2960,13 @@ bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
2949 spin_lock_irqsave(&bfad->bfad_lock, flags); 2960 spin_lock_irqsave(&bfad->bfad_lock, flags);
2950 2961
2951 /* Allocate bfa_fcxp structure */ 2962 /* Allocate bfa_fcxp structure */
2952 hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa, 2963 hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa,
2953 drv_fcxp->num_req_sgles, 2964 drv_fcxp->num_req_sgles,
2954 drv_fcxp->num_rsp_sgles, 2965 drv_fcxp->num_rsp_sgles,
2955 bfad_fcxp_get_req_sgaddr_cb, 2966 bfad_fcxp_get_req_sgaddr_cb,
2956 bfad_fcxp_get_req_sglen_cb, 2967 bfad_fcxp_get_req_sglen_cb,
2957 bfad_fcxp_get_rsp_sgaddr_cb, 2968 bfad_fcxp_get_rsp_sgaddr_cb,
2958 bfad_fcxp_get_rsp_sglen_cb); 2969 bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE);
2959 if (!hal_fcxp) { 2970 if (!hal_fcxp) {
2960 bfa_trc(bfad, 0); 2971 bfa_trc(bfad, 0);
2961 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2972 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
index 17ad6728313..8c569ddb750 100644
--- a/drivers/scsi/bfa/bfad_bsg.h
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -319,6 +319,8 @@ struct bfa_bsg_rport_attr_s {
319 u16 vf_id; 319 u16 vf_id;
320 wwn_t pwwn; 320 wwn_t pwwn;
321 wwn_t rpwwn; 321 wwn_t rpwwn;
322 u32 pid;
323 u32 rsvd;
322 struct bfa_rport_attr_s attr; 324 struct bfa_rport_attr_s attr;
323}; 325};
324 326
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 7f74f1d1912..1840651ce1d 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -37,6 +37,7 @@
37#include <linux/vmalloc.h> 37#include <linux/vmalloc.h>
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39#include <linux/bitops.h> 39#include <linux/bitops.h>
40#include <linux/aer.h>
40#include <scsi/scsi.h> 41#include <scsi/scsi.h>
41#include <scsi/scsi_host.h> 42#include <scsi/scsi_host.h>
42#include <scsi/scsi_tcq.h> 43#include <scsi/scsi_tcq.h>
@@ -56,7 +57,7 @@
56#ifdef BFA_DRIVER_VERSION 57#ifdef BFA_DRIVER_VERSION
57#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 58#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
58#else 59#else
59#define BFAD_DRIVER_VERSION "3.0.23.0" 60#define BFAD_DRIVER_VERSION "3.1.2.0"
60#endif 61#endif
61 62
62#define BFAD_PROTO_NAME FCPI_NAME 63#define BFAD_PROTO_NAME FCPI_NAME
@@ -81,6 +82,8 @@
81#define BFAD_FC4_PROBE_DONE 0x00000200 82#define BFAD_FC4_PROBE_DONE 0x00000200
82#define BFAD_PORT_DELETE 0x00000001 83#define BFAD_PORT_DELETE 0x00000001
83#define BFAD_INTX_ON 0x00000400 84#define BFAD_INTX_ON 0x00000400
85#define BFAD_EEH_BUSY 0x00000800
86#define BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE 0x00001000
84/* 87/*
85 * BFAD related definition 88 * BFAD related definition
86 */ 89 */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 2eebf8d4d58..8f92732655c 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -1216,6 +1216,15 @@ bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd
1216 return 0; 1216 return 0;
1217 } 1217 }
1218 1218
1219 if (bfad->bfad_flags & BFAD_EEH_BUSY) {
1220 if (bfad->bfad_flags & BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE)
1221 cmnd->result = DID_NO_CONNECT << 16;
1222 else
1223 cmnd->result = DID_REQUEUE << 16;
1224 done(cmnd);
1225 return 0;
1226 }
1227
1219 sg_cnt = scsi_dma_map(cmnd); 1228 sg_cnt = scsi_dma_map(cmnd);
1220 if (sg_cnt < 0) 1229 if (sg_cnt < 0)
1221 return SCSI_MLQUEUE_HOST_BUSY; 1230 return SCSI_MLQUEUE_HOST_BUSY;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 73f231ccd45..8d4626c07a1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1807,7 +1807,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
1807 fcp_sns_len = SCSI_SENSE_BUFFERSIZE; 1807 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1808 } 1808 }
1809 1809
1810 memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer)); 1810 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1811 if (fcp_sns_len) 1811 if (fcp_sns_len)
1812 memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); 1812 memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
1813 1813
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 450e011f981..76e4c039f0d 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -1422,7 +1422,8 @@ static const char * const hostbyte_table[]={
1422"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET", 1422"DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
1423"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR", 1423"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
1424"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE", 1424"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE",
1425"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST" }; 1425"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE",
1426"DID_NEXUS_FAILURE" };
1426#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table) 1427#define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
1427 1428
1428static const char * const driverbyte_table[]={ 1429static const char * const driverbyte_table[]={
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 08d80a6d272..6f4d8e6f32f 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -641,8 +641,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
641 h->state = TPGS_STATE_STANDBY; 641 h->state = TPGS_STATE_STANDBY;
642 break; 642 break;
643 case TPGS_STATE_OFFLINE: 643 case TPGS_STATE_OFFLINE:
644 case TPGS_STATE_UNAVAILABLE: 644 /* Path unusable */
645 /* Path unusable for unavailable/offline */
646 err = SCSI_DH_DEV_OFFLINED; 645 err = SCSI_DH_DEV_OFFLINED;
647 break; 646 break;
648 default: 647 default:
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 20c4557f5ab..69c915aa77c 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -790,29 +790,19 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
790 {"IBM", "1815"}, 790 {"IBM", "1815"},
791 {"IBM", "1818"}, 791 {"IBM", "1818"},
792 {"IBM", "3526"}, 792 {"IBM", "3526"},
793 {"SGI", "TP9400"}, 793 {"SGI", "TP9"},
794 {"SGI", "TP9500"},
795 {"SGI", "TP9700"},
796 {"SGI", "IS"}, 794 {"SGI", "IS"},
797 {"STK", "OPENstorage D280"}, 795 {"STK", "OPENstorage D280"},
798 {"SUN", "CSM200_R"},
799 {"SUN", "LCSM100_I"},
800 {"SUN", "LCSM100_S"},
801 {"SUN", "LCSM100_E"},
802 {"SUN", "LCSM100_F"},
803 {"DELL", "MD3000"},
804 {"DELL", "MD3000i"},
805 {"DELL", "MD32xx"},
806 {"DELL", "MD32xxi"},
807 {"DELL", "MD36xxi"},
808 {"DELL", "MD36xxf"},
809 {"LSI", "INF-01-00"},
810 {"ENGENIO", "INF-01-00"},
811 {"STK", "FLEXLINE 380"}, 796 {"STK", "FLEXLINE 380"},
812 {"SUN", "CSM100_R_FC"}, 797 {"SUN", "CSM"},
798 {"SUN", "LCSM100"},
813 {"SUN", "STK6580_6780"}, 799 {"SUN", "STK6580_6780"},
814 {"SUN", "SUN_6180"}, 800 {"SUN", "SUN_6180"},
815 {"SUN", "ArrayStorage"}, 801 {"SUN", "ArrayStorage"},
802 {"DELL", "MD3"},
803 {"NETAPP", "INF-01-00"},
804 {"LSI", "INF-01-00"},
805 {"ENGENIO", "INF-01-00"},
816 {NULL, NULL}, 806 {NULL, NULL},
817}; 807};
818 808
@@ -863,7 +853,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
863 if (!scsi_dh_data) { 853 if (!scsi_dh_data) {
864 sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", 854 sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
865 RDAC_NAME); 855 RDAC_NAME);
866 return 0; 856 return -ENOMEM;
867 } 857 }
868 858
869 scsi_dh_data->scsi_dh = &rdac_dh; 859 scsi_dh_data->scsi_dh = &rdac_dh;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 2b4261cb774..4217e49aea4 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -99,6 +99,15 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, 99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, 100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1920},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d},
102 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 111 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
103 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 112 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
104 {0,} 113 {0,}
@@ -118,13 +127,22 @@ static struct board_type products[] = {
118 {0x3249103C, "Smart Array P812", &SA5_access}, 127 {0x3249103C, "Smart Array P812", &SA5_access},
119 {0x324a103C, "Smart Array P712m", &SA5_access}, 128 {0x324a103C, "Smart Array P712m", &SA5_access},
120 {0x324b103C, "Smart Array P711m", &SA5_access}, 129 {0x324b103C, "Smart Array P711m", &SA5_access},
121 {0x3350103C, "Smart Array", &SA5_access}, 130 {0x3350103C, "Smart Array P222", &SA5_access},
122 {0x3351103C, "Smart Array", &SA5_access}, 131 {0x3351103C, "Smart Array P420", &SA5_access},
123 {0x3352103C, "Smart Array", &SA5_access}, 132 {0x3352103C, "Smart Array P421", &SA5_access},
124 {0x3353103C, "Smart Array", &SA5_access}, 133 {0x3353103C, "Smart Array P822", &SA5_access},
125 {0x3354103C, "Smart Array", &SA5_access}, 134 {0x3354103C, "Smart Array P420i", &SA5_access},
126 {0x3355103C, "Smart Array", &SA5_access}, 135 {0x3355103C, "Smart Array P220i", &SA5_access},
127 {0x3356103C, "Smart Array", &SA5_access}, 136 {0x3356103C, "Smart Array P721m", &SA5_access},
137 {0x1920103C, "Smart Array", &SA5_access},
138 {0x1921103C, "Smart Array", &SA5_access},
139 {0x1922103C, "Smart Array", &SA5_access},
140 {0x1923103C, "Smart Array", &SA5_access},
141 {0x1924103C, "Smart Array", &SA5_access},
142 {0x1925103C, "Smart Array", &SA5_access},
143 {0x1926103C, "Smart Array", &SA5_access},
144 {0x1928103C, "Smart Array", &SA5_access},
145 {0x334d103C, "Smart Array P822se", &SA5_access},
128 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 146 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
129}; 147};
130 148
@@ -2610,7 +2628,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
2610 /* not in reqQ, if also not in cmpQ, must have already completed */ 2628 /* not in reqQ, if also not in cmpQ, must have already completed */
2611 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ); 2629 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
2612 if (!found) { 2630 if (!found) {
2613 dev_dbg(&h->pdev->dev, "%s Request FAILED (not known to driver).\n", 2631 dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n",
2614 msg); 2632 msg);
2615 return SUCCESS; 2633 return SUCCESS;
2616 } 2634 }
@@ -3266,7 +3284,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
3266 c->Request.Timeout = 0; /* Don't time out */ 3284 c->Request.Timeout = 0; /* Don't time out */
3267 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 3285 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
3268 c->Request.CDB[0] = cmd; 3286 c->Request.CDB[0] = cmd;
3269 c->Request.CDB[1] = 0x03; /* Reset target above */ 3287 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
3270 /* If bytes 4-7 are zero, it means reset the */ 3288 /* If bytes 4-7 are zero, it means reset the */
3271 /* LunID device */ 3289 /* LunID device */
3272 c->Request.CDB[4] = 0x00; 3290 c->Request.CDB[4] = 0x00;
@@ -3338,7 +3356,8 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
3338{ 3356{
3339 ulong page_base = ((ulong) base) & PAGE_MASK; 3357 ulong page_base = ((ulong) base) & PAGE_MASK;
3340 ulong page_offs = ((ulong) base) - page_base; 3358 ulong page_offs = ((ulong) base) - page_base;
3341 void __iomem *page_remapped = ioremap(page_base, page_offs + size); 3359 void __iomem *page_remapped = ioremap_nocache(page_base,
3360 page_offs + size);
3342 3361
3343 return page_remapped ? (page_remapped + page_offs) : NULL; 3362 return page_remapped ? (page_remapped + page_offs) : NULL;
3344} 3363}
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
index ff5b5c5538e..cb150d1e585 100644
--- a/drivers/scsi/ibmvscsi/Makefile
+++ b/drivers/scsi/ibmvscsi/Makefile
@@ -1,7 +1,3 @@
1obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsic.o 1obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi.o
2
3ibmvscsic-y += ibmvscsi.o
4ibmvscsic-$(CONFIG_PPC_PSERIES) += rpa_vscsi.o
5
6obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o 2obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
7obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o 3obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 134a0ae85bb..5e8d51bd03d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2242,6 +2242,21 @@ static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2242} 2242}
2243 2243
2244/** 2244/**
2245 * ibmvfc_match_evt - Match function for specified event
2246 * @evt: ibmvfc event struct
2247 * @match: event to match
2248 *
2249 * Returns:
2250 * 1 if event matches key / 0 if event does not match key
2251 **/
2252static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2253{
2254 if (evt == match)
2255 return 1;
2256 return 0;
2257}
2258
2259/**
2245 * ibmvfc_abort_task_set - Abort outstanding commands to the device 2260 * ibmvfc_abort_task_set - Abort outstanding commands to the device
2246 * @sdev: scsi device to abort commands 2261 * @sdev: scsi device to abort commands
2247 * 2262 *
@@ -2322,7 +2337,20 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2322 if (rc) { 2337 if (rc) {
2323 sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n"); 2338 sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
2324 ibmvfc_reset_host(vhost); 2339 ibmvfc_reset_host(vhost);
2325 rsp_rc = 0; 2340 rsp_rc = -EIO;
2341 rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2342
2343 if (rc == SUCCESS)
2344 rsp_rc = 0;
2345
2346 rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2347 if (rc != SUCCESS) {
2348 spin_lock_irqsave(vhost->host->host_lock, flags);
2349 ibmvfc_hard_reset_host(vhost);
2350 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2351 rsp_rc = 0;
2352 }
2353
2326 goto out; 2354 goto out;
2327 } 2355 }
2328 } 2356 }
@@ -2597,8 +2625,10 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2597 case IBMVFC_AE_SCN_FABRIC: 2625 case IBMVFC_AE_SCN_FABRIC:
2598 case IBMVFC_AE_SCN_DOMAIN: 2626 case IBMVFC_AE_SCN_DOMAIN:
2599 vhost->events_to_log |= IBMVFC_AE_RSCN; 2627 vhost->events_to_log |= IBMVFC_AE_RSCN;
2600 vhost->delay_init = 1; 2628 if (vhost->state < IBMVFC_HALTED) {
2601 __ibmvfc_reset_host(vhost); 2629 vhost->delay_init = 1;
2630 __ibmvfc_reset_host(vhost);
2631 }
2602 break; 2632 break;
2603 case IBMVFC_AE_SCN_NPORT: 2633 case IBMVFC_AE_SCN_NPORT:
2604 case IBMVFC_AE_SCN_GROUP: 2634 case IBMVFC_AE_SCN_GROUP:
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 834c37fc7ce..3be8af624e6 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
29#include "viosrp.h" 29#include "viosrp.h"
30 30
31#define IBMVFC_NAME "ibmvfc" 31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.9" 32#define IBMVFC_DRIVER_VERSION "1.0.10"
33#define IBMVFC_DRIVER_DATE "(August 5, 2010)" 33#define IBMVFC_DRIVER_DATE "(August 24, 2012)"
34 34
35#define IBMVFC_DEFAULT_TIMEOUT 60 35#define IBMVFC_DEFAULT_TIMEOUT 60
36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45 36#define IBMVFC_ADISC_CANCEL_TIMEOUT 45
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 3a6c4742951..ef9a54c7da6 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -93,13 +93,13 @@ static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
93static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; 93static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
94static int fast_fail = 1; 94static int fast_fail = 1;
95static int client_reserve = 1; 95static int client_reserve = 1;
96static char partition_name[97] = "UNKNOWN";
97static unsigned int partition_number = -1;
96 98
97static struct scsi_transport_template *ibmvscsi_transport_template; 99static struct scsi_transport_template *ibmvscsi_transport_template;
98 100
99#define IBMVSCSI_VERSION "1.5.9" 101#define IBMVSCSI_VERSION "1.5.9"
100 102
101static struct ibmvscsi_ops *ibmvscsi_ops;
102
103MODULE_DESCRIPTION("IBM Virtual SCSI"); 103MODULE_DESCRIPTION("IBM Virtual SCSI");
104MODULE_AUTHOR("Dave Boutcher"); 104MODULE_AUTHOR("Dave Boutcher");
105MODULE_LICENSE("GPL"); 105MODULE_LICENSE("GPL");
@@ -118,6 +118,316 @@ MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
118module_param_named(client_reserve, client_reserve, int, S_IRUGO ); 118module_param_named(client_reserve, client_reserve, int, S_IRUGO );
119MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release"); 119MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
120 120
121static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
122 struct ibmvscsi_host_data *hostdata);
123
124/* ------------------------------------------------------------
125 * Routines for managing the command/response queue
126 */
127/**
128 * ibmvscsi_handle_event: - Interrupt handler for crq events
129 * @irq: number of irq to handle, not used
130 * @dev_instance: ibmvscsi_host_data of host that received interrupt
131 *
132 * Disables interrupts and schedules srp_task
133 * Always returns IRQ_HANDLED
134 */
135static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
136{
137 struct ibmvscsi_host_data *hostdata =
138 (struct ibmvscsi_host_data *)dev_instance;
139 vio_disable_interrupts(to_vio_dev(hostdata->dev));
140 tasklet_schedule(&hostdata->srp_task);
141 return IRQ_HANDLED;
142}
143
144/**
145 * release_crq_queue: - Deallocates data and unregisters CRQ
146 * @queue: crq_queue to initialize and register
147 * @host_data: ibmvscsi_host_data of host
148 *
149 * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
150 * the crq with the hypervisor.
151 */
152static void ibmvscsi_release_crq_queue(struct crq_queue *queue,
153 struct ibmvscsi_host_data *hostdata,
154 int max_requests)
155{
156 long rc = 0;
157 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
158 free_irq(vdev->irq, (void *)hostdata);
159 tasklet_kill(&hostdata->srp_task);
160 do {
161 if (rc)
162 msleep(100);
163 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
164 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
165 dma_unmap_single(hostdata->dev,
166 queue->msg_token,
167 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
168 free_page((unsigned long)queue->msgs);
169}
170
171/**
172 * crq_queue_next_crq: - Returns the next entry in message queue
173 * @queue: crq_queue to use
174 *
175 * Returns pointer to next entry in queue, or NULL if there are no new
176 * entried in the CRQ.
177 */
178static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
179{
180 struct viosrp_crq *crq;
181 unsigned long flags;
182
183 spin_lock_irqsave(&queue->lock, flags);
184 crq = &queue->msgs[queue->cur];
185 if (crq->valid & 0x80) {
186 if (++queue->cur == queue->size)
187 queue->cur = 0;
188 } else
189 crq = NULL;
190 spin_unlock_irqrestore(&queue->lock, flags);
191
192 return crq;
193}
194
195/**
196 * ibmvscsi_send_crq: - Send a CRQ
197 * @hostdata: the adapter
198 * @word1: the first 64 bits of the data
199 * @word2: the second 64 bits of the data
200 */
201static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
202 u64 word1, u64 word2)
203{
204 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
205
206 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
207}
208
209/**
210 * ibmvscsi_task: - Process srps asynchronously
211 * @data: ibmvscsi_host_data of host
212 */
213static void ibmvscsi_task(void *data)
214{
215 struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
216 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
217 struct viosrp_crq *crq;
218 int done = 0;
219
220 while (!done) {
221 /* Pull all the valid messages off the CRQ */
222 while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
223 ibmvscsi_handle_crq(crq, hostdata);
224 crq->valid = 0x00;
225 }
226
227 vio_enable_interrupts(vdev);
228 crq = crq_queue_next_crq(&hostdata->queue);
229 if (crq != NULL) {
230 vio_disable_interrupts(vdev);
231 ibmvscsi_handle_crq(crq, hostdata);
232 crq->valid = 0x00;
233 } else {
234 done = 1;
235 }
236 }
237}
238
239static void gather_partition_info(void)
240{
241 struct device_node *rootdn;
242
243 const char *ppartition_name;
244 const unsigned int *p_number_ptr;
245
246 /* Retrieve information about this partition */
247 rootdn = of_find_node_by_path("/");
248 if (!rootdn) {
249 return;
250 }
251
252 ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
253 if (ppartition_name)
254 strncpy(partition_name, ppartition_name,
255 sizeof(partition_name));
256 p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
257 if (p_number_ptr)
258 partition_number = *p_number_ptr;
259 of_node_put(rootdn);
260}
261
262static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
263{
264 memset(&hostdata->madapter_info, 0x00,
265 sizeof(hostdata->madapter_info));
266
267 dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
268 strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
269
270 strncpy(hostdata->madapter_info.partition_name, partition_name,
271 sizeof(hostdata->madapter_info.partition_name));
272
273 hostdata->madapter_info.partition_number = partition_number;
274
275 hostdata->madapter_info.mad_version = 1;
276 hostdata->madapter_info.os_type = 2;
277}
278
279/**
280 * reset_crq_queue: - resets a crq after a failure
281 * @queue: crq_queue to initialize and register
282 * @hostdata: ibmvscsi_host_data of host
283 *
284 */
285static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
286 struct ibmvscsi_host_data *hostdata)
287{
288 int rc = 0;
289 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
290
291 /* Close the CRQ */
292 do {
293 if (rc)
294 msleep(100);
295 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
296 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
297
298 /* Clean out the queue */
299 memset(queue->msgs, 0x00, PAGE_SIZE);
300 queue->cur = 0;
301
302 set_adapter_info(hostdata);
303
304 /* And re-open it again */
305 rc = plpar_hcall_norets(H_REG_CRQ,
306 vdev->unit_address,
307 queue->msg_token, PAGE_SIZE);
308 if (rc == 2) {
309 /* Adapter is good, but other end is not ready */
310 dev_warn(hostdata->dev, "Partner adapter not ready\n");
311 } else if (rc != 0) {
312 dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
313 }
314 return rc;
315}
316
317/**
318 * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
319 * @queue: crq_queue to initialize and register
320 * @hostdata: ibmvscsi_host_data of host
321 *
322 * Allocates a page for messages, maps it for dma, and registers
323 * the crq with the hypervisor.
324 * Returns zero on success.
325 */
326static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
327 struct ibmvscsi_host_data *hostdata,
328 int max_requests)
329{
330 int rc;
331 int retrc;
332 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
333
334 queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
335
336 if (!queue->msgs)
337 goto malloc_failed;
338 queue->size = PAGE_SIZE / sizeof(*queue->msgs);
339
340 queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
341 queue->size * sizeof(*queue->msgs),
342 DMA_BIDIRECTIONAL);
343
344 if (dma_mapping_error(hostdata->dev, queue->msg_token))
345 goto map_failed;
346
347 gather_partition_info();
348 set_adapter_info(hostdata);
349
350 retrc = rc = plpar_hcall_norets(H_REG_CRQ,
351 vdev->unit_address,
352 queue->msg_token, PAGE_SIZE);
353 if (rc == H_RESOURCE)
354 /* maybe kexecing and resource is busy. try a reset */
355 rc = ibmvscsi_reset_crq_queue(queue,
356 hostdata);
357
358 if (rc == 2) {
359 /* Adapter is good, but other end is not ready */
360 dev_warn(hostdata->dev, "Partner adapter not ready\n");
361 retrc = 0;
362 } else if (rc != 0) {
363 dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
364 goto reg_crq_failed;
365 }
366
367 queue->cur = 0;
368 spin_lock_init(&queue->lock);
369
370 tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
371 (unsigned long)hostdata);
372
373 if (request_irq(vdev->irq,
374 ibmvscsi_handle_event,
375 0, "ibmvscsi", (void *)hostdata) != 0) {
376 dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
377 vdev->irq);
378 goto req_irq_failed;
379 }
380
381 rc = vio_enable_interrupts(vdev);
382 if (rc != 0) {
383 dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
384 goto req_irq_failed;
385 }
386
387 return retrc;
388
389 req_irq_failed:
390 tasklet_kill(&hostdata->srp_task);
391 rc = 0;
392 do {
393 if (rc)
394 msleep(100);
395 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
396 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
397 reg_crq_failed:
398 dma_unmap_single(hostdata->dev,
399 queue->msg_token,
400 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
401 map_failed:
402 free_page((unsigned long)queue->msgs);
403 malloc_failed:
404 return -1;
405}
406
407/**
408 * reenable_crq_queue: - reenables a crq after
409 * @queue: crq_queue to initialize and register
410 * @hostdata: ibmvscsi_host_data of host
411 *
412 */
413static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
414 struct ibmvscsi_host_data *hostdata)
415{
416 int rc = 0;
417 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
418
419 /* Re-enable the CRQ */
420 do {
421 if (rc)
422 msleep(100);
423 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
424 } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
425
426 if (rc)
427 dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
428 return rc;
429}
430
121/* ------------------------------------------------------------ 431/* ------------------------------------------------------------
122 * Routines for the event pool and event structs 432 * Routines for the event pool and event structs
123 */ 433 */
@@ -611,7 +921,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
611 } 921 }
612 922
613 if ((rc = 923 if ((rc =
614 ibmvscsi_ops->send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { 924 ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
615 list_del(&evt_struct->list); 925 list_del(&evt_struct->list);
616 del_timer(&evt_struct->timer); 926 del_timer(&evt_struct->timer);
617 927
@@ -1420,8 +1730,8 @@ static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
1420 * @hostdata: ibmvscsi_host_data of host 1730 * @hostdata: ibmvscsi_host_data of host
1421 * 1731 *
1422*/ 1732*/
1423void ibmvscsi_handle_crq(struct viosrp_crq *crq, 1733static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1424 struct ibmvscsi_host_data *hostdata) 1734 struct ibmvscsi_host_data *hostdata)
1425{ 1735{
1426 long rc; 1736 long rc;
1427 unsigned long flags; 1737 unsigned long flags;
@@ -1433,8 +1743,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1433 case 0x01: /* Initialization message */ 1743 case 0x01: /* Initialization message */
1434 dev_info(hostdata->dev, "partner initialized\n"); 1744 dev_info(hostdata->dev, "partner initialized\n");
1435 /* Send back a response */ 1745 /* Send back a response */
1436 if ((rc = ibmvscsi_ops->send_crq(hostdata, 1746 rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0);
1437 0xC002000000000000LL, 0)) == 0) { 1747 if (rc == 0) {
1438 /* Now login */ 1748 /* Now login */
1439 init_adapter(hostdata); 1749 init_adapter(hostdata);
1440 } else { 1750 } else {
@@ -1541,6 +1851,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1541 1851
1542 host_config = &evt_struct->iu.mad.host_config; 1852 host_config = &evt_struct->iu.mad.host_config;
1543 1853
1854 /* The transport length field is only 16-bit */
1855 length = min(0xffff, length);
1856
1544 /* Set up a lun reset SRP command */ 1857 /* Set up a lun reset SRP command */
1545 memset(host_config, 0x00, sizeof(*host_config)); 1858 memset(host_config, 0x00, sizeof(*host_config));
1546 host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; 1859 host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
@@ -1840,17 +2153,17 @@ static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
1840 smp_rmb(); 2153 smp_rmb();
1841 hostdata->reset_crq = 0; 2154 hostdata->reset_crq = 0;
1842 2155
1843 rc = ibmvscsi_ops->reset_crq_queue(&hostdata->queue, hostdata); 2156 rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
1844 if (!rc) 2157 if (!rc)
1845 rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0); 2158 rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
1846 vio_enable_interrupts(to_vio_dev(hostdata->dev)); 2159 vio_enable_interrupts(to_vio_dev(hostdata->dev));
1847 } else if (hostdata->reenable_crq) { 2160 } else if (hostdata->reenable_crq) {
1848 smp_rmb(); 2161 smp_rmb();
1849 action = "enable"; 2162 action = "enable";
1850 rc = ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, hostdata); 2163 rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata);
1851 hostdata->reenable_crq = 0; 2164 hostdata->reenable_crq = 0;
1852 if (!rc) 2165 if (!rc)
1853 rc = ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0); 2166 rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
1854 } else 2167 } else
1855 return; 2168 return;
1856 2169
@@ -1944,7 +2257,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1944 goto init_crq_failed; 2257 goto init_crq_failed;
1945 } 2258 }
1946 2259
1947 rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); 2260 rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events);
1948 if (rc != 0 && rc != H_RESOURCE) { 2261 if (rc != 0 && rc != H_RESOURCE) {
1949 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); 2262 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
1950 goto kill_kthread; 2263 goto kill_kthread;
@@ -1974,7 +2287,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1974 * to fail if the other end is not acive. In that case we don't 2287 * to fail if the other end is not acive. In that case we don't
1975 * want to scan 2288 * want to scan
1976 */ 2289 */
1977 if (ibmvscsi_ops->send_crq(hostdata, 0xC001000000000000LL, 0) == 0 2290 if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
1978 || rc == H_RESOURCE) { 2291 || rc == H_RESOURCE) {
1979 /* 2292 /*
1980 * Wait around max init_timeout secs for the adapter to finish 2293 * Wait around max init_timeout secs for the adapter to finish
@@ -2002,7 +2315,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
2002 add_host_failed: 2315 add_host_failed:
2003 release_event_pool(&hostdata->pool, hostdata); 2316 release_event_pool(&hostdata->pool, hostdata);
2004 init_pool_failed: 2317 init_pool_failed:
2005 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); 2318 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events);
2006 kill_kthread: 2319 kill_kthread:
2007 kthread_stop(hostdata->work_thread); 2320 kthread_stop(hostdata->work_thread);
2008 init_crq_failed: 2321 init_crq_failed:
@@ -2018,7 +2331,7 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
2018 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); 2331 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
2019 unmap_persist_bufs(hostdata); 2332 unmap_persist_bufs(hostdata);
2020 release_event_pool(&hostdata->pool, hostdata); 2333 release_event_pool(&hostdata->pool, hostdata);
2021 ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, 2334 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
2022 max_events); 2335 max_events);
2023 2336
2024 kthread_stop(hostdata->work_thread); 2337 kthread_stop(hostdata->work_thread);
@@ -2039,7 +2352,10 @@ static int ibmvscsi_remove(struct vio_dev *vdev)
2039static int ibmvscsi_resume(struct device *dev) 2352static int ibmvscsi_resume(struct device *dev)
2040{ 2353{
2041 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev); 2354 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
2042 return ibmvscsi_ops->resume(hostdata); 2355 vio_disable_interrupts(to_vio_dev(hostdata->dev));
2356 tasklet_schedule(&hostdata->srp_task);
2357
2358 return 0;
2043} 2359}
2044 2360
2045/** 2361/**
@@ -2076,9 +2392,7 @@ int __init ibmvscsi_module_init(void)
2076 driver_template.can_queue = max_requests; 2392 driver_template.can_queue = max_requests;
2077 max_events = max_requests + 2; 2393 max_events = max_requests + 2;
2078 2394
2079 if (firmware_has_feature(FW_FEATURE_VIO)) 2395 if (!firmware_has_feature(FW_FEATURE_VIO))
2080 ibmvscsi_ops = &rpavscsi_ops;
2081 else
2082 return -ENODEV; 2396 return -ENODEV;
2083 2397
2084 ibmvscsi_transport_template = 2398 ibmvscsi_transport_template =
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index c503e177601..7d64867c5dd 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -107,26 +107,4 @@ struct ibmvscsi_host_data {
107 dma_addr_t adapter_info_addr; 107 dma_addr_t adapter_info_addr;
108}; 108};
109 109
110/* routines for managing a command/response queue */
111void ibmvscsi_handle_crq(struct viosrp_crq *crq,
112 struct ibmvscsi_host_data *hostdata);
113
114struct ibmvscsi_ops {
115 int (*init_crq_queue)(struct crq_queue *queue,
116 struct ibmvscsi_host_data *hostdata,
117 int max_requests);
118 void (*release_crq_queue)(struct crq_queue *queue,
119 struct ibmvscsi_host_data *hostdata,
120 int max_requests);
121 int (*reset_crq_queue)(struct crq_queue *queue,
122 struct ibmvscsi_host_data *hostdata);
123 int (*reenable_crq_queue)(struct crq_queue *queue,
124 struct ibmvscsi_host_data *hostdata);
125 int (*send_crq)(struct ibmvscsi_host_data *hostdata,
126 u64 word1, u64 word2);
127 int (*resume) (struct ibmvscsi_host_data *hostdata);
128};
129
130extern struct ibmvscsi_ops rpavscsi_ops;
131
132#endif /* IBMVSCSI_H */ 110#endif /* IBMVSCSI_H */
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
deleted file mode 100644
index f48ae0190d9..00000000000
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ /dev/null
@@ -1,368 +0,0 @@
1/* ------------------------------------------------------------
2 * rpa_vscsi.c
3 * (C) Copyright IBM Corporation 1994, 2003
4 * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
5 * Santiago Leon (santil@us.ibm.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 * USA
21 *
22 * ------------------------------------------------------------
23 * RPA-specific functions of the SCSI host adapter for Virtual I/O devices
24 *
25 * This driver allows the Linux SCSI peripheral drivers to directly
26 * access devices in the hosting partition, either on an iSeries
27 * hypervisor system or a converged hypervisor system.
28 */
29
30#include <asm/vio.h>
31#include <asm/prom.h>
32#include <asm/iommu.h>
33#include <asm/hvcall.h>
34#include <linux/delay.h>
35#include <linux/dma-mapping.h>
36#include <linux/gfp.h>
37#include <linux/interrupt.h>
38#include "ibmvscsi.h"
39
40static char partition_name[97] = "UNKNOWN";
41static unsigned int partition_number = -1;
42
43/* ------------------------------------------------------------
44 * Routines for managing the command/response queue
45 */
46/**
47 * rpavscsi_handle_event: - Interrupt handler for crq events
48 * @irq: number of irq to handle, not used
49 * @dev_instance: ibmvscsi_host_data of host that received interrupt
50 *
51 * Disables interrupts and schedules srp_task
52 * Always returns IRQ_HANDLED
53 */
54static irqreturn_t rpavscsi_handle_event(int irq, void *dev_instance)
55{
56 struct ibmvscsi_host_data *hostdata =
57 (struct ibmvscsi_host_data *)dev_instance;
58 vio_disable_interrupts(to_vio_dev(hostdata->dev));
59 tasklet_schedule(&hostdata->srp_task);
60 return IRQ_HANDLED;
61}
62
63/**
64 * release_crq_queue: - Deallocates data and unregisters CRQ
65 * @queue: crq_queue to initialize and register
66 * @host_data: ibmvscsi_host_data of host
67 *
68 * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
69 * the crq with the hypervisor.
70 */
71static void rpavscsi_release_crq_queue(struct crq_queue *queue,
72 struct ibmvscsi_host_data *hostdata,
73 int max_requests)
74{
75 long rc = 0;
76 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
77 free_irq(vdev->irq, (void *)hostdata);
78 tasklet_kill(&hostdata->srp_task);
79 do {
80 if (rc)
81 msleep(100);
82 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
83 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
84 dma_unmap_single(hostdata->dev,
85 queue->msg_token,
86 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
87 free_page((unsigned long)queue->msgs);
88}
89
90/**
91 * crq_queue_next_crq: - Returns the next entry in message queue
92 * @queue: crq_queue to use
93 *
94 * Returns pointer to next entry in queue, or NULL if there are no new
95 * entried in the CRQ.
96 */
97static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
98{
99 struct viosrp_crq *crq;
100 unsigned long flags;
101
102 spin_lock_irqsave(&queue->lock, flags);
103 crq = &queue->msgs[queue->cur];
104 if (crq->valid & 0x80) {
105 if (++queue->cur == queue->size)
106 queue->cur = 0;
107 } else
108 crq = NULL;
109 spin_unlock_irqrestore(&queue->lock, flags);
110
111 return crq;
112}
113
114/**
115 * rpavscsi_send_crq: - Send a CRQ
116 * @hostdata: the adapter
117 * @word1: the first 64 bits of the data
118 * @word2: the second 64 bits of the data
119 */
120static int rpavscsi_send_crq(struct ibmvscsi_host_data *hostdata,
121 u64 word1, u64 word2)
122{
123 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
124
125 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
126}
127
128/**
129 * rpavscsi_task: - Process srps asynchronously
130 * @data: ibmvscsi_host_data of host
131 */
132static void rpavscsi_task(void *data)
133{
134 struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
135 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
136 struct viosrp_crq *crq;
137 int done = 0;
138
139 while (!done) {
140 /* Pull all the valid messages off the CRQ */
141 while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
142 ibmvscsi_handle_crq(crq, hostdata);
143 crq->valid = 0x00;
144 }
145
146 vio_enable_interrupts(vdev);
147 if ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
148 vio_disable_interrupts(vdev);
149 ibmvscsi_handle_crq(crq, hostdata);
150 crq->valid = 0x00;
151 } else {
152 done = 1;
153 }
154 }
155}
156
157static void gather_partition_info(void)
158{
159 struct device_node *rootdn;
160
161 const char *ppartition_name;
162 const unsigned int *p_number_ptr;
163
164 /* Retrieve information about this partition */
165 rootdn = of_find_node_by_path("/");
166 if (!rootdn) {
167 return;
168 }
169
170 ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
171 if (ppartition_name)
172 strncpy(partition_name, ppartition_name,
173 sizeof(partition_name));
174 p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
175 if (p_number_ptr)
176 partition_number = *p_number_ptr;
177 of_node_put(rootdn);
178}
179
180static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
181{
182 memset(&hostdata->madapter_info, 0x00,
183 sizeof(hostdata->madapter_info));
184
185 dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
186 strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
187
188 strncpy(hostdata->madapter_info.partition_name, partition_name,
189 sizeof(hostdata->madapter_info.partition_name));
190
191 hostdata->madapter_info.partition_number = partition_number;
192
193 hostdata->madapter_info.mad_version = 1;
194 hostdata->madapter_info.os_type = 2;
195}
196
197/**
198 * reset_crq_queue: - resets a crq after a failure
199 * @queue: crq_queue to initialize and register
200 * @hostdata: ibmvscsi_host_data of host
201 *
202 */
203static int rpavscsi_reset_crq_queue(struct crq_queue *queue,
204 struct ibmvscsi_host_data *hostdata)
205{
206 int rc = 0;
207 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
208
209 /* Close the CRQ */
210 do {
211 if (rc)
212 msleep(100);
213 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
214 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
215
216 /* Clean out the queue */
217 memset(queue->msgs, 0x00, PAGE_SIZE);
218 queue->cur = 0;
219
220 set_adapter_info(hostdata);
221
222 /* And re-open it again */
223 rc = plpar_hcall_norets(H_REG_CRQ,
224 vdev->unit_address,
225 queue->msg_token, PAGE_SIZE);
226 if (rc == 2) {
227 /* Adapter is good, but other end is not ready */
228 dev_warn(hostdata->dev, "Partner adapter not ready\n");
229 } else if (rc != 0) {
230 dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
231 }
232 return rc;
233}
234
235/**
236 * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
237 * @queue: crq_queue to initialize and register
238 * @hostdata: ibmvscsi_host_data of host
239 *
240 * Allocates a page for messages, maps it for dma, and registers
241 * the crq with the hypervisor.
242 * Returns zero on success.
243 */
244static int rpavscsi_init_crq_queue(struct crq_queue *queue,
245 struct ibmvscsi_host_data *hostdata,
246 int max_requests)
247{
248 int rc;
249 int retrc;
250 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
251
252 queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
253
254 if (!queue->msgs)
255 goto malloc_failed;
256 queue->size = PAGE_SIZE / sizeof(*queue->msgs);
257
258 queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
259 queue->size * sizeof(*queue->msgs),
260 DMA_BIDIRECTIONAL);
261
262 if (dma_mapping_error(hostdata->dev, queue->msg_token))
263 goto map_failed;
264
265 gather_partition_info();
266 set_adapter_info(hostdata);
267
268 retrc = rc = plpar_hcall_norets(H_REG_CRQ,
269 vdev->unit_address,
270 queue->msg_token, PAGE_SIZE);
271 if (rc == H_RESOURCE)
272 /* maybe kexecing and resource is busy. try a reset */
273 rc = rpavscsi_reset_crq_queue(queue,
274 hostdata);
275
276 if (rc == 2) {
277 /* Adapter is good, but other end is not ready */
278 dev_warn(hostdata->dev, "Partner adapter not ready\n");
279 retrc = 0;
280 } else if (rc != 0) {
281 dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
282 goto reg_crq_failed;
283 }
284
285 queue->cur = 0;
286 spin_lock_init(&queue->lock);
287
288 tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task,
289 (unsigned long)hostdata);
290
291 if (request_irq(vdev->irq,
292 rpavscsi_handle_event,
293 0, "ibmvscsi", (void *)hostdata) != 0) {
294 dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
295 vdev->irq);
296 goto req_irq_failed;
297 }
298
299 rc = vio_enable_interrupts(vdev);
300 if (rc != 0) {
301 dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
302 goto req_irq_failed;
303 }
304
305 return retrc;
306
307 req_irq_failed:
308 tasklet_kill(&hostdata->srp_task);
309 rc = 0;
310 do {
311 if (rc)
312 msleep(100);
313 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
314 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
315 reg_crq_failed:
316 dma_unmap_single(hostdata->dev,
317 queue->msg_token,
318 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
319 map_failed:
320 free_page((unsigned long)queue->msgs);
321 malloc_failed:
322 return -1;
323}
324
325/**
326 * reenable_crq_queue: - reenables a crq after
327 * @queue: crq_queue to initialize and register
328 * @hostdata: ibmvscsi_host_data of host
329 *
330 */
331static int rpavscsi_reenable_crq_queue(struct crq_queue *queue,
332 struct ibmvscsi_host_data *hostdata)
333{
334 int rc = 0;
335 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
336
337 /* Re-enable the CRQ */
338 do {
339 if (rc)
340 msleep(100);
341 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
342 } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
343
344 if (rc)
345 dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
346 return rc;
347}
348
349/**
350 * rpavscsi_resume: - resume after suspend
351 * @hostdata: ibmvscsi_host_data of host
352 *
353 */
354static int rpavscsi_resume(struct ibmvscsi_host_data *hostdata)
355{
356 vio_disable_interrupts(to_vio_dev(hostdata->dev));
357 tasklet_schedule(&hostdata->srp_task);
358 return 0;
359}
360
361struct ibmvscsi_ops rpavscsi_ops = {
362 .init_crq_queue = rpavscsi_init_crq_queue,
363 .release_crq_queue = rpavscsi_release_crq_queue,
364 .reset_crq_queue = rpavscsi_reset_crq_queue,
365 .reenable_crq_queue = rpavscsi_reenable_crq_queue,
366 .send_crq = rpavscsi_send_crq,
367 .resume = rpavscsi_resume,
368};
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 45e192a5100..e3f29f61cbc 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -566,6 +566,23 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
566#endif 566#endif
567 567
568/** 568/**
569 * ipr_lock_and_done - Acquire lock and complete command
570 * @ipr_cmd: ipr command struct
571 *
572 * Return value:
573 * none
574 **/
575static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
576{
577 unsigned long lock_flags;
578 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
579
580 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
581 ipr_cmd->done(ipr_cmd);
582 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
583}
584
585/**
569 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse 586 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
570 * @ipr_cmd: ipr command struct 587 * @ipr_cmd: ipr command struct
571 * 588 *
@@ -611,34 +628,50 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
611 * Return value: 628 * Return value:
612 * none 629 * none
613 **/ 630 **/
614static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd) 631static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
632 void (*fast_done) (struct ipr_cmnd *))
615{ 633{
616 ipr_reinit_ipr_cmnd(ipr_cmd); 634 ipr_reinit_ipr_cmnd(ipr_cmd);
617 ipr_cmd->u.scratch = 0; 635 ipr_cmd->u.scratch = 0;
618 ipr_cmd->sibling = NULL; 636 ipr_cmd->sibling = NULL;
637 ipr_cmd->fast_done = fast_done;
619 init_timer(&ipr_cmd->timer); 638 init_timer(&ipr_cmd->timer);
620} 639}
621 640
622/** 641/**
623 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block 642 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
624 * @ioa_cfg: ioa config struct 643 * @ioa_cfg: ioa config struct
625 * 644 *
626 * Return value: 645 * Return value:
627 * pointer to ipr command struct 646 * pointer to ipr command struct
628 **/ 647 **/
629static 648static
630struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) 649struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
631{ 650{
632 struct ipr_cmnd *ipr_cmd; 651 struct ipr_cmnd *ipr_cmd;
633 652
634 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue); 653 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
635 list_del(&ipr_cmd->queue); 654 list_del(&ipr_cmd->queue);
636 ipr_init_ipr_cmnd(ipr_cmd);
637 655
638 return ipr_cmd; 656 return ipr_cmd;
639} 657}
640 658
641/** 659/**
660 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
661 * @ioa_cfg: ioa config struct
662 *
663 * Return value:
664 * pointer to ipr command struct
665 **/
666static
667struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
668{
669 struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
670 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
671 return ipr_cmd;
672}
673
674/**
642 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts 675 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
643 * @ioa_cfg: ioa config struct 676 * @ioa_cfg: ioa config struct
644 * @clr_ints: interrupts to clear 677 * @clr_ints: interrupts to clear
@@ -5116,8 +5149,9 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5116 u16 cmd_index; 5149 u16 cmd_index;
5117 int num_hrrq = 0; 5150 int num_hrrq = 0;
5118 int irq_none = 0; 5151 int irq_none = 0;
5119 struct ipr_cmnd *ipr_cmd; 5152 struct ipr_cmnd *ipr_cmd, *temp;
5120 irqreturn_t rc = IRQ_NONE; 5153 irqreturn_t rc = IRQ_NONE;
5154 LIST_HEAD(doneq);
5121 5155
5122 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 5156 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5123 5157
@@ -5138,8 +5172,8 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5138 5172
5139 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) { 5173 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
5140 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA"); 5174 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
5141 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5175 rc = IRQ_HANDLED;
5142 return IRQ_HANDLED; 5176 goto unlock_out;
5143 } 5177 }
5144 5178
5145 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; 5179 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
@@ -5148,9 +5182,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5148 5182
5149 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); 5183 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5150 5184
5151 list_del(&ipr_cmd->queue); 5185 list_move_tail(&ipr_cmd->queue, &doneq);
5152 del_timer(&ipr_cmd->timer);
5153 ipr_cmd->done(ipr_cmd);
5154 5186
5155 rc = IRQ_HANDLED; 5187 rc = IRQ_HANDLED;
5156 5188
@@ -5180,8 +5212,8 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5180 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES && 5212 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5181 int_reg & IPR_PCII_HRRQ_UPDATED) { 5213 int_reg & IPR_PCII_HRRQ_UPDATED) {
5182 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ"); 5214 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5183 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5215 rc = IRQ_HANDLED;
5184 return IRQ_HANDLED; 5216 goto unlock_out;
5185 } else 5217 } else
5186 break; 5218 break;
5187 } 5219 }
@@ -5189,7 +5221,14 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5189 if (unlikely(rc == IRQ_NONE)) 5221 if (unlikely(rc == IRQ_NONE))
5190 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); 5222 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5191 5223
5224unlock_out:
5192 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 5225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5226 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5227 list_del(&ipr_cmd->queue);
5228 del_timer(&ipr_cmd->timer);
5229 ipr_cmd->fast_done(ipr_cmd);
5230 }
5231
5193 return rc; 5232 return rc;
5194} 5233}
5195 5234
@@ -5770,21 +5809,28 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5770 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5809 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5771 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5810 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5772 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 5811 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5812 unsigned long lock_flags;
5773 5813
5774 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); 5814 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5775 5815
5776 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 5816 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5777 scsi_dma_unmap(ipr_cmd->scsi_cmd); 5817 scsi_dma_unmap(scsi_cmd);
5818
5819 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5778 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5820 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5779 scsi_cmd->scsi_done(scsi_cmd); 5821 scsi_cmd->scsi_done(scsi_cmd);
5780 } else 5822 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5823 } else {
5824 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5781 ipr_erp_start(ioa_cfg, ipr_cmd); 5825 ipr_erp_start(ioa_cfg, ipr_cmd);
5826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5827 }
5782} 5828}
5783 5829
5784/** 5830/**
5785 * ipr_queuecommand - Queue a mid-layer request 5831 * ipr_queuecommand - Queue a mid-layer request
5832 * @shost: scsi host struct
5786 * @scsi_cmd: scsi command struct 5833 * @scsi_cmd: scsi command struct
5787 * @done: done function
5788 * 5834 *
5789 * This function queues a request generated by the mid-layer. 5835 * This function queues a request generated by the mid-layer.
5790 * 5836 *
@@ -5793,61 +5839,61 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5793 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy 5839 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5794 * SCSI_MLQUEUE_HOST_BUSY if host is busy 5840 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5795 **/ 5841 **/
5796static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, 5842static int ipr_queuecommand(struct Scsi_Host *shost,
5797 void (*done) (struct scsi_cmnd *)) 5843 struct scsi_cmnd *scsi_cmd)
5798{ 5844{
5799 struct ipr_ioa_cfg *ioa_cfg; 5845 struct ipr_ioa_cfg *ioa_cfg;
5800 struct ipr_resource_entry *res; 5846 struct ipr_resource_entry *res;
5801 struct ipr_ioarcb *ioarcb; 5847 struct ipr_ioarcb *ioarcb;
5802 struct ipr_cmnd *ipr_cmd; 5848 struct ipr_cmnd *ipr_cmd;
5803 int rc = 0; 5849 unsigned long lock_flags;
5850 int rc;
5804 5851
5805 scsi_cmd->scsi_done = done; 5852 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
5806 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; 5853
5807 res = scsi_cmd->device->hostdata; 5854 spin_lock_irqsave(shost->host_lock, lock_flags);
5808 scsi_cmd->result = (DID_OK << 16); 5855 scsi_cmd->result = (DID_OK << 16);
5856 res = scsi_cmd->device->hostdata;
5809 5857
5810 /* 5858 /*
5811 * We are currently blocking all devices due to a host reset 5859 * We are currently blocking all devices due to a host reset
5812 * We have told the host to stop giving us new requests, but 5860 * We have told the host to stop giving us new requests, but
5813 * ERP ops don't count. FIXME 5861 * ERP ops don't count. FIXME
5814 */ 5862 */
5815 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) 5863 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
5864 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5816 return SCSI_MLQUEUE_HOST_BUSY; 5865 return SCSI_MLQUEUE_HOST_BUSY;
5866 }
5817 5867
5818 /* 5868 /*
5819 * FIXME - Create scsi_set_host_offline interface 5869 * FIXME - Create scsi_set_host_offline interface
5820 * and the ioa_is_dead check can be removed 5870 * and the ioa_is_dead check can be removed
5821 */ 5871 */
5822 if (unlikely(ioa_cfg->ioa_is_dead || !res)) { 5872 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5823 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 5873 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5824 scsi_cmd->result = (DID_NO_CONNECT << 16); 5874 goto err_nodev;
5825 scsi_cmd->scsi_done(scsi_cmd); 5875 }
5826 return 0; 5876
5877 if (ipr_is_gata(res) && res->sata_port) {
5878 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
5879 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5880 return rc;
5827 } 5881 }
5828 5882
5829 if (ipr_is_gata(res) && res->sata_port) 5883 ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
5830 return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap); 5884 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5831 5885
5832 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 5886 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
5833 ioarcb = &ipr_cmd->ioarcb; 5887 ioarcb = &ipr_cmd->ioarcb;
5834 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5835 5888
5836 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); 5889 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5837 ipr_cmd->scsi_cmd = scsi_cmd; 5890 ipr_cmd->scsi_cmd = scsi_cmd;
5838 ioarcb->res_handle = res->res_handle; 5891 ipr_cmd->done = ipr_scsi_eh_done;
5839 ipr_cmd->done = ipr_scsi_done;
5840 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5841 5892
5842 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { 5893 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5843 if (scsi_cmd->underflow == 0) 5894 if (scsi_cmd->underflow == 0)
5844 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5895 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5845 5896
5846 if (res->needs_sync_complete) {
5847 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5848 res->needs_sync_complete = 0;
5849 }
5850
5851 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 5897 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5852 if (ipr_is_gscsi(res)) 5898 if (ipr_is_gscsi(res))
5853 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; 5899 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
@@ -5859,23 +5905,46 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
5859 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) 5905 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5860 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; 5906 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5861 5907
5862 if (likely(rc == 0)) { 5908 if (ioa_cfg->sis64)
5863 if (ioa_cfg->sis64) 5909 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5864 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); 5910 else
5865 else 5911 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5866 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5867 }
5868 5912
5869 if (unlikely(rc != 0)) { 5913 spin_lock_irqsave(shost->host_lock, lock_flags);
5870 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5914 if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
5915 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5916 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5917 if (!rc)
5918 scsi_dma_unmap(scsi_cmd);
5871 return SCSI_MLQUEUE_HOST_BUSY; 5919 return SCSI_MLQUEUE_HOST_BUSY;
5872 } 5920 }
5873 5921
5922 if (unlikely(ioa_cfg->ioa_is_dead)) {
5923 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5924 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5925 scsi_dma_unmap(scsi_cmd);
5926 goto err_nodev;
5927 }
5928
5929 ioarcb->res_handle = res->res_handle;
5930 if (res->needs_sync_complete) {
5931 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5932 res->needs_sync_complete = 0;
5933 }
5934 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5935 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5874 ipr_send_command(ipr_cmd); 5936 ipr_send_command(ipr_cmd);
5937 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5875 return 0; 5938 return 0;
5876}
5877 5939
5878static DEF_SCSI_QCMD(ipr_queuecommand) 5940err_nodev:
5941 spin_lock_irqsave(shost->host_lock, lock_flags);
5942 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5943 scsi_cmd->result = (DID_NO_CONNECT << 16);
5944 scsi_cmd->scsi_done(scsi_cmd);
5945 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5946 return 0;
5947}
5879 5948
5880/** 5949/**
5881 * ipr_ioctl - IOCTL handler 5950 * ipr_ioctl - IOCTL handler
@@ -8775,8 +8844,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8775 8844
8776 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; 8845 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8777 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); 8846 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8778 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, 8847 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
8779 sata_port_info.flags, &ipr_sata_ops);
8780 8848
8781 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id); 8849 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
8782 8850
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 153b8bd91d1..c8a137f83bb 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -38,8 +38,8 @@
38/* 38/*
39 * Literals 39 * Literals
40 */ 40 */
41#define IPR_DRIVER_VERSION "2.5.3" 41#define IPR_DRIVER_VERSION "2.5.4"
42#define IPR_DRIVER_DATE "(March 10, 2012)" 42#define IPR_DRIVER_DATE "(July 11, 2012)"
43 43
44/* 44/*
45 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 45 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -1525,6 +1525,7 @@ struct ipr_cmnd {
1525 struct ata_queued_cmd *qc; 1525 struct ata_queued_cmd *qc;
1526 struct completion completion; 1526 struct completion completion;
1527 struct timer_list timer; 1527 struct timer_list timer;
1528 void (*fast_done) (struct ipr_cmnd *);
1528 void (*done) (struct ipr_cmnd *); 1529 void (*done) (struct ipr_cmnd *);
1529 int (*job_step) (struct ipr_cmnd *); 1530 int (*job_step) (struct ipr_cmnd *);
1530 int (*job_step_failed) (struct ipr_cmnd *); 1531 int (*job_step_failed) (struct ipr_cmnd *);
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index b334fdc1726..609dafd661d 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -1044,7 +1044,7 @@ static enum sci_status sci_controller_start(struct isci_host *ihost,
1044 return SCI_SUCCESS; 1044 return SCI_SUCCESS;
1045} 1045}
1046 1046
1047void isci_host_scan_start(struct Scsi_Host *shost) 1047void isci_host_start(struct Scsi_Host *shost)
1048{ 1048{
1049 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; 1049 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1050 unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost); 1050 unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
@@ -1079,7 +1079,6 @@ static void sci_controller_completion_handler(struct isci_host *ihost)
1079 1079
1080void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task) 1080void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
1081{ 1081{
1082 task->lldd_task = NULL;
1083 if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) && 1082 if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) &&
1084 !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1083 !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1085 if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) { 1084 if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) {
@@ -1087,16 +1086,19 @@ void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_ta
1087 dev_dbg(&ihost->pdev->dev, 1086 dev_dbg(&ihost->pdev->dev,
1088 "%s: Normal - ireq/task = %p/%p\n", 1087 "%s: Normal - ireq/task = %p/%p\n",
1089 __func__, ireq, task); 1088 __func__, ireq, task);
1090 1089 task->lldd_task = NULL;
1091 task->task_done(task); 1090 task->task_done(task);
1092 } else { 1091 } else {
1093 dev_dbg(&ihost->pdev->dev, 1092 dev_dbg(&ihost->pdev->dev,
1094 "%s: Error - ireq/task = %p/%p\n", 1093 "%s: Error - ireq/task = %p/%p\n",
1095 __func__, ireq, task); 1094 __func__, ireq, task);
1096 1095 if (sas_protocol_ata(task->task_proto))
1096 task->lldd_task = NULL;
1097 sas_task_abort(task); 1097 sas_task_abort(task);
1098 } 1098 }
1099 } 1099 } else
1100 task->lldd_task = NULL;
1101
1100 if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) 1102 if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
1101 wake_up_all(&ihost->eventq); 1103 wake_up_all(&ihost->eventq);
1102 1104
@@ -1120,10 +1122,16 @@ void isci_host_completion_routine(unsigned long data)
1120 sci_controller_completion_handler(ihost); 1122 sci_controller_completion_handler(ihost);
1121 spin_unlock_irq(&ihost->scic_lock); 1123 spin_unlock_irq(&ihost->scic_lock);
1122 1124
1123 /* the coalesence timeout doubles at each encoding step, so 1125 /*
1126 * we subtract SCI_MAX_PORTS to account for the number of dummy TCs
1127 * issued for hardware issue workaround
1128 */
1129 active = isci_tci_active(ihost) - SCI_MAX_PORTS;
1130
1131 /*
1132 * the coalesence timeout doubles at each encoding step, so
1124 * update it based on the ilog2 value of the outstanding requests 1133 * update it based on the ilog2 value of the outstanding requests
1125 */ 1134 */
1126 active = isci_tci_active(ihost);
1127 writel(SMU_ICC_GEN_VAL(NUMBER, active) | 1135 writel(SMU_ICC_GEN_VAL(NUMBER, active) |
1128 SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)), 1136 SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
1129 &ihost->smu_registers->interrupt_coalesce_control); 1137 &ihost->smu_registers->interrupt_coalesce_control);
@@ -1973,7 +1981,7 @@ static void sci_controller_afe_initialization(struct isci_host *ihost)
1973 } 1981 }
1974 1982
1975 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) { 1983 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
1976 struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_id]; 1984 struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_id];
1977 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id]; 1985 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
1978 int cable_length_long = 1986 int cable_length_long =
1979 is_long_cable(phy_id, cable_selection_mask); 1987 is_long_cable(phy_id, cable_selection_mask);
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 9ab58e0540e..4911310a38f 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -473,7 +473,7 @@ void sci_controller_remote_device_stopped(struct isci_host *ihost,
473 473
474enum sci_status sci_controller_continue_io(struct isci_request *ireq); 474enum sci_status sci_controller_continue_io(struct isci_request *ireq);
475int isci_host_scan_finished(struct Scsi_Host *, unsigned long); 475int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
476void isci_host_scan_start(struct Scsi_Host *); 476void isci_host_start(struct Scsi_Host *);
477u16 isci_alloc_tag(struct isci_host *ihost); 477u16 isci_alloc_tag(struct isci_host *ihost);
478enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag); 478enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
479void isci_tci_free(struct isci_host *ihost, u16 tci); 479void isci_tci_free(struct isci_host *ihost, u16 tci);
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 9be45a2b223..b74050b95d6 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -156,7 +156,7 @@ static struct scsi_host_template isci_sht = {
156 .target_alloc = sas_target_alloc, 156 .target_alloc = sas_target_alloc,
157 .slave_configure = sas_slave_configure, 157 .slave_configure = sas_slave_configure,
158 .scan_finished = isci_host_scan_finished, 158 .scan_finished = isci_host_scan_finished,
159 .scan_start = isci_host_scan_start, 159 .scan_start = isci_host_start,
160 .change_queue_depth = sas_change_queue_depth, 160 .change_queue_depth = sas_change_queue_depth,
161 .change_queue_type = sas_change_queue_type, 161 .change_queue_type = sas_change_queue_type,
162 .bios_param = sas_bios_param, 162 .bios_param = sas_bios_param,
@@ -644,7 +644,6 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
644 orom->hdr.version)) { 644 orom->hdr.version)) {
645 dev_warn(&pdev->dev, 645 dev_warn(&pdev->dev,
646 "[%d]: invalid oem parameters detected, falling back to firmware\n", i); 646 "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
647 devm_kfree(&pdev->dev, orom);
648 orom = NULL; 647 orom = NULL;
649 break; 648 break;
650 } 649 }
@@ -722,11 +721,67 @@ static void __devexit isci_pci_remove(struct pci_dev *pdev)
722 } 721 }
723} 722}
724 723
724#ifdef CONFIG_PM
725static int isci_suspend(struct device *dev)
726{
727 struct pci_dev *pdev = to_pci_dev(dev);
728 struct isci_host *ihost;
729 int i;
730
731 for_each_isci_host(i, ihost, pdev) {
732 sas_suspend_ha(&ihost->sas_ha);
733 isci_host_deinit(ihost);
734 }
735
736 pci_save_state(pdev);
737 pci_disable_device(pdev);
738 pci_set_power_state(pdev, PCI_D3hot);
739
740 return 0;
741}
742
743static int isci_resume(struct device *dev)
744{
745 struct pci_dev *pdev = to_pci_dev(dev);
746 struct isci_host *ihost;
747 int rc, i;
748
749 pci_set_power_state(pdev, PCI_D0);
750 pci_restore_state(pdev);
751
752 rc = pcim_enable_device(pdev);
753 if (rc) {
754 dev_err(&pdev->dev,
755 "enabling device failure after resume(%d)\n", rc);
756 return rc;
757 }
758
759 pci_set_master(pdev);
760
761 for_each_isci_host(i, ihost, pdev) {
762 sas_prep_resume_ha(&ihost->sas_ha);
763
764 isci_host_init(ihost);
765 isci_host_start(ihost->sas_ha.core.shost);
766 wait_for_start(ihost);
767
768 sas_resume_ha(&ihost->sas_ha);
769 }
770
771 return 0;
772}
773
774static SIMPLE_DEV_PM_OPS(isci_pm_ops, isci_suspend, isci_resume);
775#endif
776
725static struct pci_driver isci_pci_driver = { 777static struct pci_driver isci_pci_driver = {
726 .name = DRV_NAME, 778 .name = DRV_NAME,
727 .id_table = isci_id_table, 779 .id_table = isci_id_table,
728 .probe = isci_pci_probe, 780 .probe = isci_pci_probe,
729 .remove = __devexit_p(isci_pci_remove), 781 .remove = __devexit_p(isci_pci_remove),
782#ifdef CONFIG_PM
783 .driver.pm = &isci_pm_ops,
784#endif
730}; 785};
731 786
732static __init int isci_init(void) 787static __init int isci_init(void)
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 18f43d4c30b..cb87b2ef7c9 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -169,7 +169,7 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
169 phy_cap.gen1_no_ssc = 1; 169 phy_cap.gen1_no_ssc = 1;
170 if (ihost->oem_parameters.controller.do_enable_ssc) { 170 if (ihost->oem_parameters.controller.do_enable_ssc) {
171 struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe; 171 struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
172 struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_idx]; 172 struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_idx];
173 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); 173 struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
174 bool en_sas = false; 174 bool en_sas = false;
175 bool en_sata = false; 175 bool en_sata = false;
@@ -1205,6 +1205,7 @@ static void scu_link_layer_start_oob(struct isci_phy *iphy)
1205 /** Reset OOB sequence - start */ 1205 /** Reset OOB sequence - start */
1206 val = readl(&ll->phy_configuration); 1206 val = readl(&ll->phy_configuration);
1207 val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) | 1207 val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
1208 SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE) |
1208 SCU_SAS_PCFG_GEN_BIT(HARD_RESET)); 1209 SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
1209 writel(val, &ll->phy_configuration); 1210 writel(val, &ll->phy_configuration);
1210 readl(&ll->phy_configuration); /* flush */ 1211 readl(&ll->phy_configuration); /* flush */
@@ -1236,6 +1237,7 @@ static void scu_link_layer_tx_hard_reset(
1236 * to the starting state. */ 1237 * to the starting state. */
1237 phy_configuration_value = 1238 phy_configuration_value =
1238 readl(&iphy->link_layer_registers->phy_configuration); 1239 readl(&iphy->link_layer_registers->phy_configuration);
1240 phy_configuration_value &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE));
1239 phy_configuration_value |= 1241 phy_configuration_value |=
1240 (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) | 1242 (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) |
1241 SCU_SAS_PCFG_GEN_BIT(OOB_RESET)); 1243 SCU_SAS_PCFG_GEN_BIT(OOB_RESET));
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index 4d95654c3fd..8ac646e5edd 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -104,7 +104,6 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
104 104
105 if (i >= len) { 105 if (i >= len) {
106 dev_err(&pdev->dev, "oprom parse error\n"); 106 dev_err(&pdev->dev, "oprom parse error\n");
107 devm_kfree(&pdev->dev, rom);
108 rom = NULL; 107 rom = NULL;
109 } 108 }
110 pci_unmap_biosrom(oprom); 109 pci_unmap_biosrom(oprom);
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
index a703b9ce0c2..c7ee81d0112 100644
--- a/drivers/scsi/isci/remote_node_context.h
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -212,7 +212,7 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context
212 scics_sds_remote_node_context_callback callback, 212 scics_sds_remote_node_context_callback callback,
213 void *callback_parameter); 213 void *callback_parameter);
214enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, 214enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
215 u32 suspend_type, 215 enum sci_remote_node_suspension_reasons reason,
216 u32 suspension_code); 216 u32 suspension_code);
217enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, 217enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
218 scics_sds_remote_node_context_callback cb_fn, 218 scics_sds_remote_node_context_callback cb_fn,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 922086105b4..1b91ca0dc1e 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -55,7 +55,7 @@ static struct scsi_transport_template *iscsi_sw_tcp_scsi_transport;
55static struct scsi_host_template iscsi_sw_tcp_sht; 55static struct scsi_host_template iscsi_sw_tcp_sht;
56static struct iscsi_transport iscsi_sw_tcp_transport; 56static struct iscsi_transport iscsi_sw_tcp_transport;
57 57
58static unsigned int iscsi_max_lun = 512; 58static unsigned int iscsi_max_lun = ~0;
59module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); 59module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
60 60
61static int iscsi_sw_tcp_dbg; 61static int iscsi_sw_tcp_dbg;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index a59fcdc8fd6..bdb81cda840 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -580,10 +580,7 @@ int sas_ata_init(struct domain_device *found_dev)
580 struct ata_port *ap; 580 struct ata_port *ap;
581 int rc; 581 int rc;
582 582
583 ata_host_init(&found_dev->sata_dev.ata_host, 583 ata_host_init(&found_dev->sata_dev.ata_host, ha->dev, &sas_sata_ops);
584 ha->dev,
585 sata_port_info.flags,
586 &sas_sata_ops);
587 ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host, 584 ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host,
588 &sata_port_info, 585 &sata_port_info,
589 shost); 586 shost);
@@ -700,6 +697,92 @@ void sas_probe_sata(struct asd_sas_port *port)
700 if (ata_dev_disabled(sas_to_ata_dev(dev))) 697 if (ata_dev_disabled(sas_to_ata_dev(dev)))
701 sas_fail_probe(dev, __func__, -ENODEV); 698 sas_fail_probe(dev, __func__, -ENODEV);
702 } 699 }
700
701}
702
703static bool sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
704{
705 struct domain_device *dev, *n;
706 bool retry = false;
707
708 list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
709 int rc;
710
711 if (!dev_is_sata(dev))
712 continue;
713
714 sas_ata_wait_eh(dev);
715 rc = dev->sata_dev.pm_result;
716 if (rc == -EAGAIN)
717 retry = true;
718 else if (rc) {
719 /* since we don't have a
720 * ->port_{suspend|resume} routine in our
721 * ata_port ops, and no entanglements with
722 * acpi, suspend should just be mechanical trip
723 * through eh, catch cases where these
724 * assumptions are invalidated
725 */
726 WARN_ONCE(1, "failed %s %s error: %d\n", func,
727 dev_name(&dev->rphy->dev), rc);
728 }
729
730 /* if libata failed to power manage the device, tear it down */
731 if (ata_dev_disabled(sas_to_ata_dev(dev)))
732 sas_fail_probe(dev, func, -ENODEV);
733 }
734
735 return retry;
736}
737
738void sas_suspend_sata(struct asd_sas_port *port)
739{
740 struct domain_device *dev;
741
742 retry:
743 mutex_lock(&port->ha->disco_mutex);
744 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
745 struct sata_device *sata;
746
747 if (!dev_is_sata(dev))
748 continue;
749
750 sata = &dev->sata_dev;
751 if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND)
752 continue;
753
754 sata->pm_result = -EIO;
755 ata_sas_port_async_suspend(sata->ap, &sata->pm_result);
756 }
757 mutex_unlock(&port->ha->disco_mutex);
758
759 if (sas_ata_flush_pm_eh(port, __func__))
760 goto retry;
761}
762
763void sas_resume_sata(struct asd_sas_port *port)
764{
765 struct domain_device *dev;
766
767 retry:
768 mutex_lock(&port->ha->disco_mutex);
769 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
770 struct sata_device *sata;
771
772 if (!dev_is_sata(dev))
773 continue;
774
775 sata = &dev->sata_dev;
776 if (sata->ap->pm_mesg.event == PM_EVENT_ON)
777 continue;
778
779 sata->pm_result = -EIO;
780 ata_sas_port_async_resume(sata->ap, &sata->pm_result);
781 }
782 mutex_unlock(&port->ha->disco_mutex);
783
784 if (sas_ata_flush_pm_eh(port, __func__))
785 goto retry;
703} 786}
704 787
705/** 788/**
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 3e9dc1a8435..a0c3003e0c7 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/scatterlist.h> 25#include <linux/scatterlist.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/async.h>
27#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
28#include <scsi/scsi_eh.h> 29#include <scsi/scsi_eh.h>
29#include "sas_internal.h" 30#include "sas_internal.h"
@@ -180,16 +181,18 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
180 struct Scsi_Host *shost = sas_ha->core.shost; 181 struct Scsi_Host *shost = sas_ha->core.shost;
181 struct sas_internal *i = to_sas_internal(shost->transportt); 182 struct sas_internal *i = to_sas_internal(shost->transportt);
182 183
183 if (i->dft->lldd_dev_found) { 184 if (!i->dft->lldd_dev_found)
184 res = i->dft->lldd_dev_found(dev); 185 return 0;
185 if (res) { 186
186 printk("sas: driver on pcidev %s cannot handle " 187 res = i->dft->lldd_dev_found(dev);
187 "device %llx, error:%d\n", 188 if (res) {
188 dev_name(sas_ha->dev), 189 printk("sas: driver on pcidev %s cannot handle "
189 SAS_ADDR(dev->sas_addr), res); 190 "device %llx, error:%d\n",
190 } 191 dev_name(sas_ha->dev),
191 kref_get(&dev->kref); 192 SAS_ADDR(dev->sas_addr), res);
192 } 193 }
194 set_bit(SAS_DEV_FOUND, &dev->state);
195 kref_get(&dev->kref);
193 return res; 196 return res;
194} 197}
195 198
@@ -200,7 +203,10 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
200 struct Scsi_Host *shost = sas_ha->core.shost; 203 struct Scsi_Host *shost = sas_ha->core.shost;
201 struct sas_internal *i = to_sas_internal(shost->transportt); 204 struct sas_internal *i = to_sas_internal(shost->transportt);
202 205
203 if (i->dft->lldd_dev_gone) { 206 if (!i->dft->lldd_dev_gone)
207 return;
208
209 if (test_and_clear_bit(SAS_DEV_FOUND, &dev->state)) {
204 i->dft->lldd_dev_gone(dev); 210 i->dft->lldd_dev_gone(dev);
205 sas_put_device(dev); 211 sas_put_device(dev);
206 } 212 }
@@ -234,6 +240,47 @@ static void sas_probe_devices(struct work_struct *work)
234 } 240 }
235} 241}
236 242
243static void sas_suspend_devices(struct work_struct *work)
244{
245 struct asd_sas_phy *phy;
246 struct domain_device *dev;
247 struct sas_discovery_event *ev = to_sas_discovery_event(work);
248 struct asd_sas_port *port = ev->port;
249 struct Scsi_Host *shost = port->ha->core.shost;
250 struct sas_internal *si = to_sas_internal(shost->transportt);
251
252 clear_bit(DISCE_SUSPEND, &port->disc.pending);
253
254 sas_suspend_sata(port);
255
256 /* lldd is free to forget the domain_device across the
257 * suspension, we force the issue here to keep the reference
258 * counts aligned
259 */
260 list_for_each_entry(dev, &port->dev_list, dev_list_node)
261 sas_notify_lldd_dev_gone(dev);
262
263 /* we are suspending, so we know events are disabled and
264 * phy_list is not being mutated
265 */
266 list_for_each_entry(phy, &port->phy_list, port_phy_el) {
267 if (si->dft->lldd_port_formed)
268 si->dft->lldd_port_deformed(phy);
269 phy->suspended = 1;
270 port->suspended = 1;
271 }
272}
273
274static void sas_resume_devices(struct work_struct *work)
275{
276 struct sas_discovery_event *ev = to_sas_discovery_event(work);
277 struct asd_sas_port *port = ev->port;
278
279 clear_bit(DISCE_RESUME, &port->disc.pending);
280
281 sas_resume_sata(port);
282}
283
237/** 284/**
238 * sas_discover_end_dev -- discover an end device (SSP, etc) 285 * sas_discover_end_dev -- discover an end device (SSP, etc)
239 * @end: pointer to domain device of interest 286 * @end: pointer to domain device of interest
@@ -530,6 +577,8 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
530 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, 577 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
531 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, 578 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
532 [DISCE_PROBE] = sas_probe_devices, 579 [DISCE_PROBE] = sas_probe_devices,
580 [DISCE_SUSPEND] = sas_suspend_devices,
581 [DISCE_RESUME] = sas_resume_devices,
533 [DISCE_DESTRUCT] = sas_destruct_devices, 582 [DISCE_DESTRUCT] = sas_destruct_devices,
534 }; 583 };
535 584
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
index fc460933575..cd6f99c1ae7 100644
--- a/drivers/scsi/libsas/sas_dump.c
+++ b/drivers/scsi/libsas/sas_dump.c
@@ -41,6 +41,7 @@ static const char *sas_phye_str[] = {
41 [1] = "PHYE_OOB_DONE", 41 [1] = "PHYE_OOB_DONE",
42 [2] = "PHYE_OOB_ERROR", 42 [2] = "PHYE_OOB_ERROR",
43 [3] = "PHYE_SPINUP_HOLD", 43 [3] = "PHYE_SPINUP_HOLD",
44 [4] = "PHYE_RESUME_TIMEOUT",
44}; 45};
45 46
46void sas_dprint_porte(int phyid, enum port_event pe) 47void sas_dprint_porte(int phyid, enum port_event pe)
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 789c4d8bb7a..aadbd5314c5 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -134,7 +134,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
134 &phy->port_events[event].work, ha); 134 &phy->port_events[event].work, ha);
135} 135}
136 136
137static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) 137void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
138{ 138{
139 struct sas_ha_struct *ha = phy->ha; 139 struct sas_ha_struct *ha = phy->ha;
140 140
@@ -159,7 +159,7 @@ int sas_init_events(struct sas_ha_struct *sas_ha)
159 159
160 sas_ha->notify_ha_event = notify_ha_event; 160 sas_ha->notify_ha_event = notify_ha_event;
161 sas_ha->notify_port_event = notify_port_event; 161 sas_ha->notify_port_event = notify_port_event;
162 sas_ha->notify_phy_event = notify_phy_event; 162 sas_ha->notify_phy_event = sas_notify_phy_event;
163 163
164 return 0; 164 return 0;
165} 165}
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 014297c0588..dbc8a793fd8 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -178,7 +178,7 @@ Undo_phys:
178 return error; 178 return error;
179} 179}
180 180
181int sas_unregister_ha(struct sas_ha_struct *sas_ha) 181static void sas_disable_events(struct sas_ha_struct *sas_ha)
182{ 182{
183 /* Set the state to unregistered to avoid further unchained 183 /* Set the state to unregistered to avoid further unchained
184 * events to be queued, and flush any in-progress drainers 184 * events to be queued, and flush any in-progress drainers
@@ -189,7 +189,11 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
189 spin_unlock_irq(&sas_ha->lock); 189 spin_unlock_irq(&sas_ha->lock);
190 __sas_drain_work(sas_ha); 190 __sas_drain_work(sas_ha);
191 mutex_unlock(&sas_ha->drain_mutex); 191 mutex_unlock(&sas_ha->drain_mutex);
192}
192 193
194int sas_unregister_ha(struct sas_ha_struct *sas_ha)
195{
196 sas_disable_events(sas_ha);
193 sas_unregister_ports(sas_ha); 197 sas_unregister_ports(sas_ha);
194 198
195 /* flush unregistration work */ 199 /* flush unregistration work */
@@ -381,6 +385,90 @@ int sas_set_phy_speed(struct sas_phy *phy,
381 return ret; 385 return ret;
382} 386}
383 387
388void sas_prep_resume_ha(struct sas_ha_struct *ha)
389{
390 int i;
391
392 set_bit(SAS_HA_REGISTERED, &ha->state);
393
394 /* clear out any stale link events/data from the suspension path */
395 for (i = 0; i < ha->num_phys; i++) {
396 struct asd_sas_phy *phy = ha->sas_phy[i];
397
398 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
399 phy->port_events_pending = 0;
400 phy->phy_events_pending = 0;
401 phy->frame_rcvd_size = 0;
402 }
403}
404EXPORT_SYMBOL(sas_prep_resume_ha);
405
406static int phys_suspended(struct sas_ha_struct *ha)
407{
408 int i, rc = 0;
409
410 for (i = 0; i < ha->num_phys; i++) {
411 struct asd_sas_phy *phy = ha->sas_phy[i];
412
413 if (phy->suspended)
414 rc++;
415 }
416
417 return rc;
418}
419
420void sas_resume_ha(struct sas_ha_struct *ha)
421{
422 const unsigned long tmo = msecs_to_jiffies(25000);
423 int i;
424
425 /* deform ports on phys that did not resume
426 * at this point we may be racing the phy coming back (as posted
427 * by the lldd). So we post the event and once we are in the
428 * libsas context check that the phy remains suspended before
429 * tearing it down.
430 */
431 i = phys_suspended(ha);
432 if (i)
433 dev_info(ha->dev, "waiting up to 25 seconds for %d phy%s to resume\n",
434 i, i > 1 ? "s" : "");
435 wait_event_timeout(ha->eh_wait_q, phys_suspended(ha) == 0, tmo);
436 for (i = 0; i < ha->num_phys; i++) {
437 struct asd_sas_phy *phy = ha->sas_phy[i];
438
439 if (phy->suspended) {
440 dev_warn(&phy->phy->dev, "resume timeout\n");
441 sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT);
442 }
443 }
444
445 /* all phys are back up or timed out, turn on i/o so we can
446 * flush out disks that did not return
447 */
448 scsi_unblock_requests(ha->core.shost);
449 sas_drain_work(ha);
450}
451EXPORT_SYMBOL(sas_resume_ha);
452
453void sas_suspend_ha(struct sas_ha_struct *ha)
454{
455 int i;
456
457 sas_disable_events(ha);
458 scsi_block_requests(ha->core.shost);
459 for (i = 0; i < ha->num_phys; i++) {
460 struct asd_sas_port *port = ha->sas_port[i];
461
462 sas_discover_event(port, DISCE_SUSPEND);
463 }
464
465 /* flush suspend events while unregistered */
466 mutex_lock(&ha->drain_mutex);
467 __sas_drain_work(ha);
468 mutex_unlock(&ha->drain_mutex);
469}
470EXPORT_SYMBOL(sas_suspend_ha);
471
384static void sas_phy_release(struct sas_phy *phy) 472static void sas_phy_release(struct sas_phy *phy)
385{ 473{
386 kfree(phy->hostdata); 474 kfree(phy->hostdata);
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 507e4cf12e5..1de67964e5a 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -89,6 +89,7 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
89 enum phy_func phy_func, struct sas_phy_linkrates *); 89 enum phy_func phy_func, struct sas_phy_linkrates *);
90int sas_smp_get_phy_events(struct sas_phy *phy); 90int sas_smp_get_phy_events(struct sas_phy *phy);
91 91
92void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
92void sas_device_set_phy(struct domain_device *dev, struct sas_port *port); 93void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
93struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy); 94struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
94struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id); 95struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index 521422e857a..cdee446c29e 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -94,6 +94,25 @@ static void sas_phye_spinup_hold(struct work_struct *work)
94 i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL); 94 i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
95} 95}
96 96
97static void sas_phye_resume_timeout(struct work_struct *work)
98{
99 struct asd_sas_event *ev = to_asd_sas_event(work);
100 struct asd_sas_phy *phy = ev->phy;
101
102 clear_bit(PHYE_RESUME_TIMEOUT, &phy->phy_events_pending);
103
104 /* phew, lldd got the phy back in the nick of time */
105 if (!phy->suspended) {
106 dev_info(&phy->phy->dev, "resume timeout cancelled\n");
107 return;
108 }
109
110 phy->error = 0;
111 phy->suspended = 0;
112 sas_deform_port(phy, 1);
113}
114
115
97/* ---------- Phy class registration ---------- */ 116/* ---------- Phy class registration ---------- */
98 117
99int sas_register_phys(struct sas_ha_struct *sas_ha) 118int sas_register_phys(struct sas_ha_struct *sas_ha)
@@ -105,6 +124,8 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
105 [PHYE_OOB_DONE] = sas_phye_oob_done, 124 [PHYE_OOB_DONE] = sas_phye_oob_done,
106 [PHYE_OOB_ERROR] = sas_phye_oob_error, 125 [PHYE_OOB_ERROR] = sas_phye_oob_error,
107 [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold, 126 [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
127 [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
128
108 }; 129 };
109 130
110 static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = { 131 static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index e884a8c58a0..1398b714c01 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -39,6 +39,49 @@ static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy
39 return true; 39 return true;
40} 40}
41 41
42static void sas_resume_port(struct asd_sas_phy *phy)
43{
44 struct domain_device *dev;
45 struct asd_sas_port *port = phy->port;
46 struct sas_ha_struct *sas_ha = phy->ha;
47 struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
48
49 if (si->dft->lldd_port_formed)
50 si->dft->lldd_port_formed(phy);
51
52 if (port->suspended)
53 port->suspended = 0;
54 else {
55 /* we only need to handle "link returned" actions once */
56 return;
57 }
58
59 /* if the port came back:
60 * 1/ presume every device came back
61 * 2/ force the next revalidation to check all expander phys
62 */
63 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
64 int i, rc;
65
66 rc = sas_notify_lldd_dev_found(dev);
67 if (rc) {
68 sas_unregister_dev(port, dev);
69 continue;
70 }
71
72 if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) {
73 dev->ex_dev.ex_change_count = -1;
74 for (i = 0; i < dev->ex_dev.num_phys; i++) {
75 struct ex_phy *phy = &dev->ex_dev.ex_phy[i];
76
77 phy->phy_change_count = -1;
78 }
79 }
80 }
81
82 sas_discover_event(port, DISCE_RESUME);
83}
84
42/** 85/**
43 * sas_form_port -- add this phy to a port 86 * sas_form_port -- add this phy to a port
44 * @phy: the phy of interest 87 * @phy: the phy of interest
@@ -58,7 +101,14 @@ static void sas_form_port(struct asd_sas_phy *phy)
58 if (port) { 101 if (port) {
59 if (!phy_is_wideport_member(port, phy)) 102 if (!phy_is_wideport_member(port, phy))
60 sas_deform_port(phy, 0); 103 sas_deform_port(phy, 0);
61 else { 104 else if (phy->suspended) {
105 phy->suspended = 0;
106 sas_resume_port(phy);
107
108 /* phy came back, try to cancel the timeout */
109 wake_up(&sas_ha->eh_wait_q);
110 return;
111 } else {
62 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", 112 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
63 __func__, phy->id, phy->port->id, 113 __func__, phy->id, phy->port->id,
64 phy->port->num_phys); 114 phy->port->num_phys);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index a65c05a8d48..a184c2443a6 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -73,6 +73,8 @@ struct lpfc_sli2_slim;
73#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */ 73#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
74#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */ 74#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
75 75
76#define LPFC_LOOK_AHEAD_OFF 0 /* Look ahead logic is turned off */
77
76/* Error Attention event polling interval */ 78/* Error Attention event polling interval */
77#define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */ 79#define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */
78 80
@@ -684,6 +686,7 @@ struct lpfc_hba {
684#define LPFC_FCF_FOV 1 /* Fast fcf failover */ 686#define LPFC_FCF_FOV 1 /* Fast fcf failover */
685#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */ 687#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
686 uint32_t cfg_fcf_failover_policy; 688 uint32_t cfg_fcf_failover_policy;
689 uint32_t cfg_fcp_io_sched;
687 uint32_t cfg_cr_delay; 690 uint32_t cfg_cr_delay;
688 uint32_t cfg_cr_count; 691 uint32_t cfg_cr_count;
689 uint32_t cfg_multi_ring_support; 692 uint32_t cfg_multi_ring_support;
@@ -695,6 +698,7 @@ struct lpfc_hba {
695 uint32_t cfg_fcp_imax; 698 uint32_t cfg_fcp_imax;
696 uint32_t cfg_fcp_wq_count; 699 uint32_t cfg_fcp_wq_count;
697 uint32_t cfg_fcp_eq_count; 700 uint32_t cfg_fcp_eq_count;
701 uint32_t cfg_fcp_io_channel;
698 uint32_t cfg_sg_seg_cnt; 702 uint32_t cfg_sg_seg_cnt;
699 uint32_t cfg_prot_sg_seg_cnt; 703 uint32_t cfg_prot_sg_seg_cnt;
700 uint32_t cfg_sg_dma_buf_size; 704 uint32_t cfg_sg_dma_buf_size;
@@ -732,7 +736,7 @@ struct lpfc_hba {
732 uint32_t hbq_count; /* Count of configured HBQs */ 736 uint32_t hbq_count; /* Count of configured HBQs */
733 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ 737 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
734 738
735 uint32_t fcp_qidx; /* next work queue to post work to */ 739 atomic_t fcp_qidx; /* next work queue to post work to */
736 740
737 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ 741 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
738 unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */ 742 unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index adef5bb2100..b032562aa0d 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3643,18 +3643,25 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
3643 struct lpfc_hba *phba = vport->phba; 3643 struct lpfc_hba *phba = vport->phba;
3644 int val = 0, i; 3644 int val = 0, i;
3645 3645
3646 /* fcp_imax is only valid for SLI4 */
3647 if (phba->sli_rev != LPFC_SLI_REV4)
3648 return -EINVAL;
3649
3646 /* Sanity check on user data */ 3650 /* Sanity check on user data */
3647 if (!isdigit(buf[0])) 3651 if (!isdigit(buf[0]))
3648 return -EINVAL; 3652 return -EINVAL;
3649 if (sscanf(buf, "%i", &val) != 1) 3653 if (sscanf(buf, "%i", &val) != 1)
3650 return -EINVAL; 3654 return -EINVAL;
3651 3655
3652 /* Value range is [636,651042] */ 3656 /*
3653 if (val < LPFC_MIM_IMAX || val > LPFC_DMULT_CONST) 3657 * Value range for the HBA is [5000,5000000]
3658 * The value for each EQ depends on how many EQs are configured.
3659 */
3660 if (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)
3654 return -EINVAL; 3661 return -EINVAL;
3655 3662
3656 phba->cfg_fcp_imax = (uint32_t)val; 3663 phba->cfg_fcp_imax = (uint32_t)val;
3657 for (i = 0; i < phba->cfg_fcp_eq_count; i += LPFC_MAX_EQ_DELAY) 3664 for (i = 0; i < phba->cfg_fcp_io_channel; i += LPFC_MAX_EQ_DELAY)
3658 lpfc_modify_fcp_eq_delay(phba, i); 3665 lpfc_modify_fcp_eq_delay(phba, i);
3659 3666
3660 return strlen(buf); 3667 return strlen(buf);
@@ -3662,13 +3669,14 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
3662 3669
3663/* 3670/*
3664# lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second 3671# lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
3672# for the HBA.
3665# 3673#
3666# Value range is [636,651042]. Default value is 10000. 3674# Value range is [5,000 to 5,000,000]. Default value is 50,000.
3667*/ 3675*/
3668static int lpfc_fcp_imax = LPFC_FP_DEF_IMAX; 3676static int lpfc_fcp_imax = LPFC_DEF_IMAX;
3669module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR); 3677module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
3670MODULE_PARM_DESC(lpfc_fcp_imax, 3678MODULE_PARM_DESC(lpfc_fcp_imax,
3671 "Set the maximum number of fast-path FCP interrupts per second"); 3679 "Set the maximum number of FCP interrupts per second per HBA");
3672lpfc_param_show(fcp_imax) 3680lpfc_param_show(fcp_imax)
3673 3681
3674/** 3682/**
@@ -3687,14 +3695,19 @@ lpfc_param_show(fcp_imax)
3687static int 3695static int
3688lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) 3696lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
3689{ 3697{
3690 if (val >= LPFC_MIM_IMAX && val <= LPFC_DMULT_CONST) { 3698 if (phba->sli_rev != LPFC_SLI_REV4) {
3699 phba->cfg_fcp_imax = 0;
3700 return 0;
3701 }
3702
3703 if (val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) {
3691 phba->cfg_fcp_imax = val; 3704 phba->cfg_fcp_imax = val;
3692 return 0; 3705 return 0;
3693 } 3706 }
3694 3707
3695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3708 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3696 "3016 fcp_imax: %d out of range, using default\n", val); 3709 "3016 fcp_imax: %d out of range, using default\n", val);
3697 phba->cfg_fcp_imax = LPFC_FP_DEF_IMAX; 3710 phba->cfg_fcp_imax = LPFC_DEF_IMAX;
3698 3711
3699 return 0; 3712 return 0;
3700} 3713}
@@ -3765,6 +3778,16 @@ static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
3765LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); 3778LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
3766 3779
3767/* 3780/*
3781# lpfc_fcp_io_sched: Determine scheduling algrithmn for issuing FCP cmds
3782# range is [0,1]. Default value is 0.
3783# For [0], FCP commands are issued to Work Queues ina round robin fashion.
3784# For [1], FCP commands are issued to a Work Queue associated with the
3785# current CPU.
3786*/
3787LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for "
3788 "issuing commands [0] - Round Robin, [1] - Current CPU");
3789
3790/*
3768# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing 3791# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
3769# cr_delay (msec) or cr_count outstanding commands. cr_delay can take 3792# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
3770# value [0,63]. cr_count can take value [1,255]. Default value of cr_delay 3793# value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
@@ -3844,21 +3867,33 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
3844 3867
3845/* 3868/*
3846# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues 3869# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
3870# This parameter is ignored and will eventually be depricated
3847# 3871#
3848# Value range is [1,31]. Default value is 4. 3872# Value range is [1,7]. Default value is 4.
3849*/ 3873*/
3850LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX, 3874LPFC_ATTR_R(fcp_wq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
3875 LPFC_FCP_IO_CHAN_MAX,
3851 "Set the number of fast-path FCP work queues, if possible"); 3876 "Set the number of fast-path FCP work queues, if possible");
3852 3877
3853/* 3878/*
3854# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues 3879# lpfc_fcp_eq_count: Set the number of FCP EQ/CQ/WQ IO channels
3855# 3880#
3856# Value range is [1,7]. Default value is 1. 3881# Value range is [1,7]. Default value is 4.
3857*/ 3882*/
3858LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX, 3883LPFC_ATTR_R(fcp_eq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
3884 LPFC_FCP_IO_CHAN_MAX,
3859 "Set the number of fast-path FCP event queues, if possible"); 3885 "Set the number of fast-path FCP event queues, if possible");
3860 3886
3861/* 3887/*
3888# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels
3889#
3890# Value range is [1,7]. Default value is 4.
3891*/
3892LPFC_ATTR_R(fcp_io_channel, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
3893 LPFC_FCP_IO_CHAN_MAX,
3894 "Set the number of FCP I/O channels");
3895
3896/*
3862# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. 3897# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
3863# 0 = HBA resets disabled 3898# 0 = HBA resets disabled
3864# 1 = HBA resets enabled (default) 3899# 1 = HBA resets enabled (default)
@@ -3883,6 +3918,17 @@ LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
3883LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); 3918LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
3884 3919
3885/* 3920/*
3921# lpfc_fcp_look_ahead: Look ahead for completions in FCP start routine
3922# 0 = disabled (default)
3923# 1 = enabled
3924# Value range is [0,1]. Default value is 0.
3925*/
3926unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
3927
3928module_param(lpfc_fcp_look_ahead, uint, S_IRUGO);
3929MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
3930
3931/*
3886# lpfc_prot_mask: i 3932# lpfc_prot_mask: i
3887# - Bit mask of host protection capabilities used to register with the 3933# - Bit mask of host protection capabilities used to register with the
3888# SCSI mid-layer 3934# SCSI mid-layer
@@ -3976,6 +4022,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
3976 &dev_attr_lpfc_topology, 4022 &dev_attr_lpfc_topology,
3977 &dev_attr_lpfc_scan_down, 4023 &dev_attr_lpfc_scan_down,
3978 &dev_attr_lpfc_link_speed, 4024 &dev_attr_lpfc_link_speed,
4025 &dev_attr_lpfc_fcp_io_sched,
3979 &dev_attr_lpfc_cr_delay, 4026 &dev_attr_lpfc_cr_delay,
3980 &dev_attr_lpfc_cr_count, 4027 &dev_attr_lpfc_cr_count,
3981 &dev_attr_lpfc_multi_ring_support, 4028 &dev_attr_lpfc_multi_ring_support,
@@ -4002,6 +4049,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
4002 &dev_attr_lpfc_fcp_imax, 4049 &dev_attr_lpfc_fcp_imax,
4003 &dev_attr_lpfc_fcp_wq_count, 4050 &dev_attr_lpfc_fcp_wq_count,
4004 &dev_attr_lpfc_fcp_eq_count, 4051 &dev_attr_lpfc_fcp_eq_count,
4052 &dev_attr_lpfc_fcp_io_channel,
4005 &dev_attr_lpfc_enable_bg, 4053 &dev_attr_lpfc_enable_bg,
4006 &dev_attr_lpfc_soft_wwnn, 4054 &dev_attr_lpfc_soft_wwnn,
4007 &dev_attr_lpfc_soft_wwpn, 4055 &dev_attr_lpfc_soft_wwpn,
@@ -4964,6 +5012,7 @@ struct fc_function_template lpfc_vport_transport_functions = {
4964void 5012void
4965lpfc_get_cfgparam(struct lpfc_hba *phba) 5013lpfc_get_cfgparam(struct lpfc_hba *phba)
4966{ 5014{
5015 lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
4967 lpfc_cr_delay_init(phba, lpfc_cr_delay); 5016 lpfc_cr_delay_init(phba, lpfc_cr_delay);
4968 lpfc_cr_count_init(phba, lpfc_cr_count); 5017 lpfc_cr_count_init(phba, lpfc_cr_count);
4969 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support); 5018 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
@@ -4980,6 +5029,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
4980 lpfc_fcp_imax_init(phba, lpfc_fcp_imax); 5029 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
4981 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); 5030 lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
4982 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); 5031 lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
5032 lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
4983 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); 5033 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
4984 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); 5034 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
4985 lpfc_enable_bg_init(phba, lpfc_enable_bg); 5035 lpfc_enable_bg_init(phba, lpfc_enable_bg);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 253d9a85734..f7368eb8041 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -195,7 +195,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
195 195
196 if (rsp->ulpStatus) { 196 if (rsp->ulpStatus) {
197 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 197 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
198 switch (rsp->un.ulpWord[4] & 0xff) { 198 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
199 case IOERR_SEQUENCE_TIMEOUT: 199 case IOERR_SEQUENCE_TIMEOUT:
200 rc = -ETIMEDOUT; 200 rc = -ETIMEDOUT;
201 break; 201 break;
@@ -1234,7 +1234,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1234 1234
1235 if (rsp->ulpStatus) { 1235 if (rsp->ulpStatus) {
1236 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 1236 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1237 switch (rsp->un.ulpWord[4] & 0xff) { 1237 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
1238 case IOERR_SEQUENCE_TIMEOUT: 1238 case IOERR_SEQUENCE_TIMEOUT:
1239 rc = -ETIMEDOUT; 1239 rc = -ETIMEDOUT;
1240 break; 1240 break;
@@ -1714,6 +1714,8 @@ lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1714 phba->sli4_hba.lnk_info.lnk_no); 1714 phba->sli4_hba.lnk_info.lnk_no);
1715 1715
1716 link_diag_state = &pmboxq->u.mqe.un.link_diag_state; 1716 link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1717 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
1718 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
1717 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req, 1719 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1718 phba->sli4_hba.lnk_info.lnk_no); 1720 phba->sli4_hba.lnk_info.lnk_no);
1719 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req, 1721 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
@@ -4796,7 +4798,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
4796 menlo_resp->xri = rsp->ulpContext; 4798 menlo_resp->xri = rsp->ulpContext;
4797 if (rsp->ulpStatus) { 4799 if (rsp->ulpStatus) {
4798 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 4800 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
4799 switch (rsp->un.ulpWord[4] & 0xff) { 4801 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
4800 case IOERR_SEQUENCE_TIMEOUT: 4802 case IOERR_SEQUENCE_TIMEOUT:
4801 rc = -ETIMEDOUT; 4803 rc = -ETIMEDOUT;
4802 break; 4804 break;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 8a2a514a255..e470c489de0 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -196,8 +196,7 @@ irqreturn_t lpfc_sli_intr_handler(int, void *);
196irqreturn_t lpfc_sli_sp_intr_handler(int, void *); 196irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
197irqreturn_t lpfc_sli_fp_intr_handler(int, void *); 197irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
198irqreturn_t lpfc_sli4_intr_handler(int, void *); 198irqreturn_t lpfc_sli4_intr_handler(int, void *);
199irqreturn_t lpfc_sli4_sp_intr_handler(int, void *); 199irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
200irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
201 200
202void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); 201void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
203void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *); 202void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -391,6 +390,7 @@ extern spinlock_t pgcnt_lock;
391extern unsigned int pgcnt; 390extern unsigned int pgcnt;
392extern unsigned int lpfc_prot_mask; 391extern unsigned int lpfc_prot_mask;
393extern unsigned char lpfc_prot_guard; 392extern unsigned char lpfc_prot_guard;
393extern unsigned int lpfc_fcp_look_ahead;
394 394
395/* Interface exported by fabric iocb scheduler */ 395/* Interface exported by fabric iocb scheduler */
396void lpfc_fabric_abort_nport(struct lpfc_nodelist *); 396void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
@@ -457,6 +457,8 @@ int lpfc_sli4_queue_create(struct lpfc_hba *);
457void lpfc_sli4_queue_destroy(struct lpfc_hba *); 457void lpfc_sli4_queue_destroy(struct lpfc_hba *);
458void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *, 458void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *,
459 struct sli4_wcqe_xri_aborted *); 459 struct sli4_wcqe_xri_aborted *);
460void lpfc_sli_abts_recover_port(struct lpfc_vport *,
461 struct lpfc_nodelist *);
460int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t); 462int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
461int lpfc_issue_reg_vfi(struct lpfc_vport *); 463int lpfc_issue_reg_vfi(struct lpfc_vport *);
462int lpfc_issue_unreg_vfi(struct lpfc_vport *); 464int lpfc_issue_unreg_vfi(struct lpfc_vport *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 93e96b3c909..7ffabb7e3af 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -104,7 +104,8 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
104 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) { 104 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
105 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 105 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
106 } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) && 106 } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
107 ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) { 107 ((icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
108 IOERR_RCV_BUFFER_WAITING)) {
108 /* Not enough posted buffers; Try posting more buffers */ 109 /* Not enough posted buffers; Try posting more buffers */
109 phba->fc_stat.NoRcvBuf++; 110 phba->fc_stat.NoRcvBuf++;
110 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 111 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
@@ -633,7 +634,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
633 /* Check for retry */ 634 /* Check for retry */
634 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { 635 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
635 if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT || 636 if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
636 irsp->un.ulpWord[4] != IOERR_NO_RESOURCES) 637 (irsp->un.ulpWord[4] && IOERR_PARAM_MASK) !=
638 IOERR_NO_RESOURCES)
637 vport->fc_ns_retry++; 639 vport->fc_ns_retry++;
638 640
639 /* CT command is being retried */ 641 /* CT command is being retried */
@@ -783,7 +785,9 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
783 if (cmdiocb->retry < LPFC_MAX_NS_RETRY) { 785 if (cmdiocb->retry < LPFC_MAX_NS_RETRY) {
784 retry = 1; 786 retry = 1;
785 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 787 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
786 switch (irsp->un.ulpWord[4]) { 788 switch ((irsp->un.ulpWord[4] &
789 IOERR_PARAM_MASK)) {
790
787 case IOERR_NO_RESOURCES: 791 case IOERR_NO_RESOURCES:
788 /* We don't increment the retry 792 /* We don't increment the retry
789 * count for this case. 793 * count for this case.
@@ -908,8 +912,10 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
908 cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]); 912 cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
909 913
910 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 914 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
911 ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) || 915 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
912 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) 916 IOERR_SLI_DOWN) ||
917 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
918 IOERR_SLI_ABORTED)))
913 goto out; 919 goto out;
914 920
915 retry = cmdiocb->retry; 921 retry = cmdiocb->retry;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 3217d63ed28..f63f5ff7f27 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -490,9 +490,11 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
490 len += snprintf(buf+len, size-len, 490 len += snprintf(buf+len, size-len,
491 "Ring %d: CMD GetInx:%d (Max:%d Next:%d " 491 "Ring %d: CMD GetInx:%d (Max:%d Next:%d "
492 "Local:%d flg:x%x) RSP PutInx:%d Max:%d\n", 492 "Local:%d flg:x%x) RSP PutInx:%d Max:%d\n",
493 i, pgpp->cmdGetInx, pring->numCiocb, 493 i, pgpp->cmdGetInx, pring->sli.sli3.numCiocb,
494 pring->next_cmdidx, pring->local_getidx, 494 pring->sli.sli3.next_cmdidx,
495 pring->flag, pgpp->rspPutInx, pring->numRiocb); 495 pring->sli.sli3.local_getidx,
496 pring->flag, pgpp->rspPutInx,
497 pring->sli.sli3.numRiocb);
496 } 498 }
497 499
498 if (phba->sli_rev <= LPFC_SLI_REV3) { 500 if (phba->sli_rev <= LPFC_SLI_REV3) {
@@ -557,6 +559,9 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
557 case NLP_STE_PRLI_ISSUE: 559 case NLP_STE_PRLI_ISSUE:
558 statep = "PRLI "; 560 statep = "PRLI ";
559 break; 561 break;
562 case NLP_STE_LOGO_ISSUE:
563 statep = "LOGO ";
564 break;
560 case NLP_STE_UNMAPPED_NODE: 565 case NLP_STE_UNMAPPED_NODE:
561 statep = "UNMAP "; 566 statep = "UNMAP ";
562 break; 567 break;
@@ -581,8 +586,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
581 "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ", 586 "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
582 *name, *(name+1), *(name+2), *(name+3), 587 *name, *(name+1), *(name+2), *(name+3),
583 *(name+4), *(name+5), *(name+6), *(name+7)); 588 *(name+4), *(name+5), *(name+6), *(name+7));
584 len += snprintf(buf+len, size-len, "RPI:%03d flag:x%08x ", 589 if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
585 ndlp->nlp_rpi, ndlp->nlp_flag); 590 len += snprintf(buf+len, size-len, "RPI:%03d ",
591 ndlp->nlp_rpi);
592 else
593 len += snprintf(buf+len, size-len, "RPI:none ");
594 len += snprintf(buf+len, size-len, "flag:x%08x ",
595 ndlp->nlp_flag);
586 if (!ndlp->nlp_type) 596 if (!ndlp->nlp_type)
587 len += snprintf(buf+len, size-len, "UNKNOWN_TYPE "); 597 len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
588 if (ndlp->nlp_type & NLP_FC_NODE) 598 if (ndlp->nlp_type & NLP_FC_NODE)
@@ -1999,207 +2009,298 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
1999{ 2009{
2000 struct lpfc_debug *debug = file->private_data; 2010 struct lpfc_debug *debug = file->private_data;
2001 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; 2011 struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
2002 int len = 0, fcp_qidx; 2012 int len = 0;
2003 char *pbuffer; 2013 char *pbuffer;
2014 int x, cnt;
2015 int max_cnt;
2016 struct lpfc_queue *qp = NULL;
2017
2004 2018
2005 if (!debug->buffer) 2019 if (!debug->buffer)
2006 debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL); 2020 debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL);
2007 if (!debug->buffer) 2021 if (!debug->buffer)
2008 return 0; 2022 return 0;
2009 pbuffer = debug->buffer; 2023 pbuffer = debug->buffer;
2024 max_cnt = LPFC_QUE_INFO_GET_BUF_SIZE - 128;
2010 2025
2011 if (*ppos) 2026 if (*ppos)
2012 return 0; 2027 return 0;
2013 2028
2014 /* Get slow-path event queue information */ 2029 spin_lock_irq(&phba->hbalock);
2015 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2030
2016 "Slow-path EQ information:\n"); 2031 /* Fast-path event queue */
2017 if (phba->sli4_hba.sp_eq) { 2032 if (phba->sli4_hba.hba_eq && phba->cfg_fcp_io_channel) {
2018 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2033 cnt = phba->cfg_fcp_io_channel;
2019 "\tEQID[%02d], " 2034
2020 "QE-COUNT[%04d], QE-SIZE[%04d], " 2035 for (x = 0; x < cnt; x++) {
2021 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2036
2022 phba->sli4_hba.sp_eq->queue_id, 2037 /* Fast-path EQ */
2023 phba->sli4_hba.sp_eq->entry_count, 2038 qp = phba->sli4_hba.hba_eq[x];
2024 phba->sli4_hba.sp_eq->entry_size, 2039 if (!qp)
2025 phba->sli4_hba.sp_eq->host_index, 2040 goto proc_cq;
2026 phba->sli4_hba.sp_eq->hba_index); 2041
2027 } 2042 len += snprintf(pbuffer+len,
2043 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2044 "\nHBA EQ info: "
2045 "EQ-STAT[max:x%x noE:x%x "
2046 "bs:x%x proc:x%llx]\n",
2047 qp->q_cnt_1, qp->q_cnt_2,
2048 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
2049
2050 len += snprintf(pbuffer+len,
2051 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2052 "EQID[%02d], "
2053 "QE-CNT[%04d], QE-SIZE[%04d], "
2054 "HOST-IDX[%04d], PORT-IDX[%04d]",
2055 qp->queue_id,
2056 qp->entry_count,
2057 qp->entry_size,
2058 qp->host_index,
2059 qp->hba_index);
2060
2061
2062 /* Reset max counter */
2063 qp->EQ_max_eqe = 0;
2064
2065 len += snprintf(pbuffer+len,
2066 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2067 if (len >= max_cnt)
2068 goto too_big;
2069proc_cq:
2070 /* Fast-path FCP CQ */
2071 qp = phba->sli4_hba.fcp_cq[x];
2072 len += snprintf(pbuffer+len,
2073 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2074 "\tFCP CQ info: ");
2075 len += snprintf(pbuffer+len,
2076 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2077 "AssocEQID[%02d]: "
2078 "CQ STAT[max:x%x relw:x%x "
2079 "xabt:x%x wq:x%llx]\n",
2080 qp->assoc_qid,
2081 qp->q_cnt_1, qp->q_cnt_2,
2082 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
2083 len += snprintf(pbuffer+len,
2084 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2085 "\tCQID[%02d], "
2086 "QE-CNT[%04d], QE-SIZE[%04d], "
2087 "HOST-IDX[%04d], PORT-IDX[%04d]",
2088 qp->queue_id, qp->entry_count,
2089 qp->entry_size, qp->host_index,
2090 qp->hba_index);
2091
2028 2092
2029 /* Get fast-path event queue information */ 2093 /* Reset max counter */
2030 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2094 qp->CQ_max_cqe = 0;
2031 "Fast-path EQ information:\n"); 2095
2032 if (phba->sli4_hba.fp_eq) { 2096 len += snprintf(pbuffer+len,
2033 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; 2097 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2034 fcp_qidx++) { 2098 if (len >= max_cnt)
2035 if (phba->sli4_hba.fp_eq[fcp_qidx]) { 2099 goto too_big;
2100
2101 /* Fast-path FCP WQ */
2102 qp = phba->sli4_hba.fcp_wq[x];
2103
2104 len += snprintf(pbuffer+len,
2105 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2106 "\t\tFCP WQ info: ");
2107 len += snprintf(pbuffer+len,
2108 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2109 "AssocCQID[%02d]: "
2110 "WQ-STAT[oflow:x%x posted:x%llx]\n",
2111 qp->assoc_qid,
2112 qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
2113 len += snprintf(pbuffer+len,
2114 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2115 "\t\tWQID[%02d], "
2116 "QE-CNT[%04d], QE-SIZE[%04d], "
2117 "HOST-IDX[%04d], PORT-IDX[%04d]",
2118 qp->queue_id,
2119 qp->entry_count,
2120 qp->entry_size,
2121 qp->host_index,
2122 qp->hba_index);
2123
2124 len += snprintf(pbuffer+len,
2125 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2126 if (len >= max_cnt)
2127 goto too_big;
2128
2129 if (x)
2130 continue;
2131
2132 /* Only EQ 0 has slow path CQs configured */
2133
2134 /* Slow-path mailbox CQ */
2135 qp = phba->sli4_hba.mbx_cq;
2136 if (qp) {
2137 len += snprintf(pbuffer+len,
2138 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2139 "\tMBX CQ info: ");
2140 len += snprintf(pbuffer+len,
2141 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2142 "AssocEQID[%02d]: "
2143 "CQ-STAT[mbox:x%x relw:x%x "
2144 "xabt:x%x wq:x%llx]\n",
2145 qp->assoc_qid,
2146 qp->q_cnt_1, qp->q_cnt_2,
2147 qp->q_cnt_3,
2148 (unsigned long long)qp->q_cnt_4);
2036 len += snprintf(pbuffer+len, 2149 len += snprintf(pbuffer+len,
2037 LPFC_QUE_INFO_GET_BUF_SIZE-len, 2150 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2038 "\tEQID[%02d], " 2151 "\tCQID[%02d], "
2039 "QE-COUNT[%04d], QE-SIZE[%04d], " 2152 "QE-CNT[%04d], QE-SIZE[%04d], "
2040 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2153 "HOST-IDX[%04d], PORT-IDX[%04d]",
2041 phba->sli4_hba.fp_eq[fcp_qidx]->queue_id, 2154 qp->queue_id, qp->entry_count,
2042 phba->sli4_hba.fp_eq[fcp_qidx]->entry_count, 2155 qp->entry_size, qp->host_index,
2043 phba->sli4_hba.fp_eq[fcp_qidx]->entry_size, 2156 qp->hba_index);
2044 phba->sli4_hba.fp_eq[fcp_qidx]->host_index, 2157
2045 phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); 2158 len += snprintf(pbuffer+len,
2159 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2160 if (len >= max_cnt)
2161 goto too_big;
2046 } 2162 }
2047 }
2048 }
2049 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2050
2051 /* Get mailbox complete queue information */
2052 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2053 "Slow-path MBX CQ information:\n");
2054 if (phba->sli4_hba.mbx_cq) {
2055 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2056 "Associated EQID[%02d]:\n",
2057 phba->sli4_hba.mbx_cq->assoc_qid);
2058 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2059 "\tCQID[%02d], "
2060 "QE-COUNT[%04d], QE-SIZE[%04d], "
2061 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
2062 phba->sli4_hba.mbx_cq->queue_id,
2063 phba->sli4_hba.mbx_cq->entry_count,
2064 phba->sli4_hba.mbx_cq->entry_size,
2065 phba->sli4_hba.mbx_cq->host_index,
2066 phba->sli4_hba.mbx_cq->hba_index);
2067 }
2068 2163
2069 /* Get slow-path complete queue information */ 2164 /* Slow-path MBOX MQ */
2070 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2165 qp = phba->sli4_hba.mbx_wq;
2071 "Slow-path ELS CQ information:\n"); 2166 if (qp) {
2072 if (phba->sli4_hba.els_cq) { 2167 len += snprintf(pbuffer+len,
2073 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2168 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2074 "Associated EQID[%02d]:\n", 2169 "\t\tMBX MQ info: ");
2075 phba->sli4_hba.els_cq->assoc_qid); 2170 len += snprintf(pbuffer+len,
2076 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2171 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2077 "\tCQID [%02d], " 2172 "AssocCQID[%02d]:\n",
2078 "QE-COUNT[%04d], QE-SIZE[%04d], " 2173 phba->sli4_hba.mbx_wq->assoc_qid);
2079 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2174 len += snprintf(pbuffer+len,
2080 phba->sli4_hba.els_cq->queue_id, 2175 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2081 phba->sli4_hba.els_cq->entry_count, 2176 "\t\tWQID[%02d], "
2082 phba->sli4_hba.els_cq->entry_size, 2177 "QE-CNT[%04d], QE-SIZE[%04d], "
2083 phba->sli4_hba.els_cq->host_index, 2178 "HOST-IDX[%04d], PORT-IDX[%04d]",
2084 phba->sli4_hba.els_cq->hba_index); 2179 qp->queue_id, qp->entry_count,
2085 } 2180 qp->entry_size, qp->host_index,
2181 qp->hba_index);
2182
2183 len += snprintf(pbuffer+len,
2184 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2185 if (len >= max_cnt)
2186 goto too_big;
2187 }
2086 2188
2087 /* Get fast-path complete queue information */ 2189 /* Slow-path ELS response CQ */
2088 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2190 qp = phba->sli4_hba.els_cq;
2089 "Fast-path FCP CQ information:\n"); 2191 if (qp) {
2090 fcp_qidx = 0;
2091 if (phba->sli4_hba.fcp_cq) {
2092 do {
2093 if (phba->sli4_hba.fcp_cq[fcp_qidx]) {
2094 len += snprintf(pbuffer+len, 2192 len += snprintf(pbuffer+len,
2095 LPFC_QUE_INFO_GET_BUF_SIZE-len, 2193 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2096 "Associated EQID[%02d]:\n", 2194 "\tELS CQ info: ");
2097 phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
2098 len += snprintf(pbuffer+len, 2195 len += snprintf(pbuffer+len,
2099 LPFC_QUE_INFO_GET_BUF_SIZE-len, 2196 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2100 "\tCQID[%02d], " 2197 "AssocEQID[%02d]: "
2101 "QE-COUNT[%04d], QE-SIZE[%04d], " 2198 "CQ-STAT[max:x%x relw:x%x "
2102 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2199 "xabt:x%x wq:x%llx]\n",
2103 phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id, 2200 qp->assoc_qid,
2104 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count, 2201 qp->q_cnt_1, qp->q_cnt_2,
2105 phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size, 2202 qp->q_cnt_3,
2106 phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, 2203 (unsigned long long)qp->q_cnt_4);
2107 phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); 2204 len += snprintf(pbuffer+len,
2205 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2206 "\tCQID [%02d], "
2207 "QE-CNT[%04d], QE-SIZE[%04d], "
2208 "HOST-IDX[%04d], PORT-IDX[%04d]",
2209 qp->queue_id, qp->entry_count,
2210 qp->entry_size, qp->host_index,
2211 qp->hba_index);
2212
2213 /* Reset max counter */
2214 qp->CQ_max_cqe = 0;
2215
2216 len += snprintf(pbuffer+len,
2217 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2218 if (len >= max_cnt)
2219 goto too_big;
2108 } 2220 }
2109 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
2110 len += snprintf(pbuffer+len,
2111 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2112 }
2113 2221
2114 /* Get mailbox queue information */ 2222 /* Slow-path ELS WQ */
2115 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2223 qp = phba->sli4_hba.els_wq;
2116 "Slow-path MBX MQ information:\n"); 2224 if (qp) {
2117 if (phba->sli4_hba.mbx_wq) { 2225 len += snprintf(pbuffer+len,
2118 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2226 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2119 "Associated CQID[%02d]:\n", 2227 "\t\tELS WQ info: ");
2120 phba->sli4_hba.mbx_wq->assoc_qid); 2228 len += snprintf(pbuffer+len,
2121 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2229 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2122 "\tWQID[%02d], " 2230 "AssocCQID[%02d]: "
2123 "QE-COUNT[%04d], QE-SIZE[%04d], " 2231 " WQ-STAT[oflow:x%x "
2124 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n", 2232 "posted:x%llx]\n",
2125 phba->sli4_hba.mbx_wq->queue_id, 2233 qp->assoc_qid,
2126 phba->sli4_hba.mbx_wq->entry_count, 2234 qp->q_cnt_1,
2127 phba->sli4_hba.mbx_wq->entry_size, 2235 (unsigned long long)qp->q_cnt_4);
2128 phba->sli4_hba.mbx_wq->host_index, 2236 len += snprintf(pbuffer+len,
2129 phba->sli4_hba.mbx_wq->hba_index); 2237 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2130 } 2238 "\t\tWQID[%02d], "
2239 "QE-CNT[%04d], QE-SIZE[%04d], "
2240 "HOST-IDX[%04d], PORT-IDX[%04d]",
2241 qp->queue_id, qp->entry_count,
2242 qp->entry_size, qp->host_index,
2243 qp->hba_index);
2244
2245 len += snprintf(pbuffer+len,
2246 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2247 if (len >= max_cnt)
2248 goto too_big;
2249 }
2131 2250
2132 /* Get slow-path work queue information */ 2251 if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
2133 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2252 /* Slow-path RQ header */
2134 "Slow-path ELS WQ information:\n"); 2253 qp = phba->sli4_hba.hdr_rq;
2135 if (phba->sli4_hba.els_wq) {
2136 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2137 "Associated CQID[%02d]:\n",
2138 phba->sli4_hba.els_wq->assoc_qid);
2139 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2140 "\tWQID[%02d], "
2141 "QE-COUNT[%04d], QE-SIZE[%04d], "
2142 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
2143 phba->sli4_hba.els_wq->queue_id,
2144 phba->sli4_hba.els_wq->entry_count,
2145 phba->sli4_hba.els_wq->entry_size,
2146 phba->sli4_hba.els_wq->host_index,
2147 phba->sli4_hba.els_wq->hba_index);
2148 }
2149 2254
2150 /* Get fast-path work queue information */ 2255 len += snprintf(pbuffer+len,
2151 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2256 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2152 "Fast-path FCP WQ information:\n"); 2257 "\t\tRQ info: ");
2153 if (phba->sli4_hba.fcp_wq) { 2258 len += snprintf(pbuffer+len,
2154 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
2155 fcp_qidx++) {
2156 if (!phba->sli4_hba.fcp_wq[fcp_qidx])
2157 continue;
2158 len += snprintf(pbuffer+len,
2159 LPFC_QUE_INFO_GET_BUF_SIZE-len, 2259 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2160 "Associated CQID[%02d]:\n", 2260 "AssocCQID[%02d]: "
2161 phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid); 2261 "RQ-STAT[nopost:x%x nobuf:x%x "
2162 len += snprintf(pbuffer+len, 2262 "trunc:x%x rcv:x%llx]\n",
2263 qp->assoc_qid,
2264 qp->q_cnt_1, qp->q_cnt_2,
2265 qp->q_cnt_3,
2266 (unsigned long long)qp->q_cnt_4);
2267 len += snprintf(pbuffer+len,
2163 LPFC_QUE_INFO_GET_BUF_SIZE-len, 2268 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2164 "\tWQID[%02d], " 2269 "\t\tHQID[%02d], "
2165 "QE-COUNT[%04d], WQE-SIZE[%04d], " 2270 "QE-CNT[%04d], QE-SIZE[%04d], "
2166 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n", 2271 "HOST-IDX[%04d], PORT-IDX[%04d]\n",
2167 phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id, 2272 qp->queue_id,
2168 phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count, 2273 qp->entry_count,
2169 phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size, 2274 qp->entry_size,
2170 phba->sli4_hba.fcp_wq[fcp_qidx]->host_index, 2275 qp->host_index,
2171 phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index); 2276 qp->hba_index);
2277
2278 /* Slow-path RQ data */
2279 qp = phba->sli4_hba.dat_rq;
2280 len += snprintf(pbuffer+len,
2281 LPFC_QUE_INFO_GET_BUF_SIZE-len,
2282 "\t\tDQID[%02d], "
2283 "QE-CNT[%04d], QE-SIZE[%04d], "
2284 "HOST-IDX[%04d], PORT-IDX[%04d]\n",
2285 qp->queue_id,
2286 qp->entry_count,
2287 qp->entry_size,
2288 qp->host_index,
2289 qp->hba_index);
2290
2291 len += snprintf(pbuffer+len,
2292 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2293 }
2172 } 2294 }
2173 len += snprintf(pbuffer+len,
2174 LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
2175 } 2295 }
2176 2296
2177 /* Get receive queue information */ 2297 spin_unlock_irq(&phba->hbalock);
2178 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2298 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
2179 "Slow-path RQ information:\n"); 2299
2180 if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) { 2300too_big:
2181 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, 2301 len += snprintf(pbuffer+len,
2182 "Associated CQID[%02d]:\n", 2302 LPFC_QUE_INFO_GET_BUF_SIZE-len, "Truncated ...\n");
2183 phba->sli4_hba.hdr_rq->assoc_qid); 2303 spin_unlock_irq(&phba->hbalock);
2184 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2185 "\tHQID[%02d], "
2186 "QE-COUNT[%04d], QE-SIZE[%04d], "
2187 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
2188 phba->sli4_hba.hdr_rq->queue_id,
2189 phba->sli4_hba.hdr_rq->entry_count,
2190 phba->sli4_hba.hdr_rq->entry_size,
2191 phba->sli4_hba.hdr_rq->host_index,
2192 phba->sli4_hba.hdr_rq->hba_index);
2193 len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
2194 "\tDQID[%02d], "
2195 "QE-COUNT[%04d], QE-SIZE[%04d], "
2196 "HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
2197 phba->sli4_hba.dat_rq->queue_id,
2198 phba->sli4_hba.dat_rq->entry_count,
2199 phba->sli4_hba.dat_rq->entry_size,
2200 phba->sli4_hba.dat_rq->host_index,
2201 phba->sli4_hba.dat_rq->hba_index);
2202 }
2203 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); 2304 return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
2204} 2305}
2205 2306
@@ -2408,31 +2509,21 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2408 2509
2409 switch (quetp) { 2510 switch (quetp) {
2410 case LPFC_IDIAG_EQ: 2511 case LPFC_IDIAG_EQ:
2411 /* Slow-path event queue */ 2512 /* HBA event queue */
2412 if (phba->sli4_hba.sp_eq && 2513 if (phba->sli4_hba.hba_eq) {
2413 phba->sli4_hba.sp_eq->queue_id == queid) { 2514 for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
2414 /* Sanity check */ 2515 qidx++) {
2415 rc = lpfc_idiag_que_param_check( 2516 if (phba->sli4_hba.hba_eq[qidx] &&
2416 phba->sli4_hba.sp_eq, index, count); 2517 phba->sli4_hba.hba_eq[qidx]->queue_id ==
2417 if (rc)
2418 goto error_out;
2419 idiag.ptr_private = phba->sli4_hba.sp_eq;
2420 goto pass_check;
2421 }
2422 /* Fast-path event queue */
2423 if (phba->sli4_hba.fp_eq) {
2424 for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
2425 if (phba->sli4_hba.fp_eq[qidx] &&
2426 phba->sli4_hba.fp_eq[qidx]->queue_id ==
2427 queid) { 2518 queid) {
2428 /* Sanity check */ 2519 /* Sanity check */
2429 rc = lpfc_idiag_que_param_check( 2520 rc = lpfc_idiag_que_param_check(
2430 phba->sli4_hba.fp_eq[qidx], 2521 phba->sli4_hba.hba_eq[qidx],
2431 index, count); 2522 index, count);
2432 if (rc) 2523 if (rc)
2433 goto error_out; 2524 goto error_out;
2434 idiag.ptr_private = 2525 idiag.ptr_private =
2435 phba->sli4_hba.fp_eq[qidx]; 2526 phba->sli4_hba.hba_eq[qidx];
2436 goto pass_check; 2527 goto pass_check;
2437 } 2528 }
2438 } 2529 }
@@ -2479,7 +2570,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2479 phba->sli4_hba.fcp_cq[qidx]; 2570 phba->sli4_hba.fcp_cq[qidx];
2480 goto pass_check; 2571 goto pass_check;
2481 } 2572 }
2482 } while (++qidx < phba->cfg_fcp_eq_count); 2573 } while (++qidx < phba->cfg_fcp_io_channel);
2483 } 2574 }
2484 goto error_out; 2575 goto error_out;
2485 break; 2576 break;
@@ -2511,7 +2602,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
2511 } 2602 }
2512 /* FCP work queue */ 2603 /* FCP work queue */
2513 if (phba->sli4_hba.fcp_wq) { 2604 if (phba->sli4_hba.fcp_wq) {
2514 for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) { 2605 for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
2606 qidx++) {
2515 if (!phba->sli4_hba.fcp_wq[qidx]) 2607 if (!phba->sli4_hba.fcp_wq[qidx])
2516 continue; 2608 continue;
2517 if (phba->sli4_hba.fcp_wq[qidx]->queue_id == 2609 if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
@@ -4490,7 +4582,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
4490 lpfc_debug_dump_mbx_wq(phba); 4582 lpfc_debug_dump_mbx_wq(phba);
4491 lpfc_debug_dump_els_wq(phba); 4583 lpfc_debug_dump_els_wq(phba);
4492 4584
4493 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) 4585 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
4494 lpfc_debug_dump_fcp_wq(phba, fcp_wqidx); 4586 lpfc_debug_dump_fcp_wq(phba, fcp_wqidx);
4495 4587
4496 lpfc_debug_dump_hdr_rq(phba); 4588 lpfc_debug_dump_hdr_rq(phba);
@@ -4501,14 +4593,12 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
4501 lpfc_debug_dump_mbx_cq(phba); 4593 lpfc_debug_dump_mbx_cq(phba);
4502 lpfc_debug_dump_els_cq(phba); 4594 lpfc_debug_dump_els_cq(phba);
4503 4595
4504 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) 4596 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
4505 lpfc_debug_dump_fcp_cq(phba, fcp_wqidx); 4597 lpfc_debug_dump_fcp_cq(phba, fcp_wqidx);
4506 4598
4507 /* 4599 /*
4508 * Dump Event Queues (EQs) 4600 * Dump Event Queues (EQs)
4509 */ 4601 */
4510 lpfc_debug_dump_sp_eq(phba); 4602 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
4511 4603 lpfc_debug_dump_hba_eq(phba, fcp_wqidx);
4512 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++)
4513 lpfc_debug_dump_fcp_eq(phba, fcp_wqidx);
4514} 4604}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index afe368fd1b9..8b2b6a3bfc2 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -36,6 +36,9 @@
36/* dumpHostSlim output buffer size */ 36/* dumpHostSlim output buffer size */
37#define LPFC_DUMPHOSTSLIM_SIZE 4096 37#define LPFC_DUMPHOSTSLIM_SIZE 4096
38 38
39/* dumpSLIqinfo output buffer size */
40#define LPFC_DUMPSLIQINFO_SIZE 4096
41
39/* hbqinfo output buffer size */ 42/* hbqinfo output buffer size */
40#define LPFC_HBQINFO_SIZE 8192 43#define LPFC_HBQINFO_SIZE 8192
41 44
@@ -366,7 +369,7 @@ static inline void
366lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx) 369lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx)
367{ 370{
368 /* sanity check */ 371 /* sanity check */
369 if (fcp_wqidx >= phba->cfg_fcp_wq_count) 372 if (fcp_wqidx >= phba->cfg_fcp_io_channel)
370 return; 373 return;
371 374
372 printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n", 375 printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n",
@@ -388,15 +391,15 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
388 int fcp_cqidx, fcp_cqid; 391 int fcp_cqidx, fcp_cqid;
389 392
390 /* sanity check */ 393 /* sanity check */
391 if (fcp_wqidx >= phba->cfg_fcp_wq_count) 394 if (fcp_wqidx >= phba->cfg_fcp_io_channel)
392 return; 395 return;
393 396
394 fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid; 397 fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
395 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) 398 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
396 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) 399 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
397 break; 400 break;
398 if (phba->intr_type == MSIX) { 401 if (phba->intr_type == MSIX) {
399 if (fcp_cqidx >= phba->cfg_fcp_eq_count) 402 if (fcp_cqidx >= phba->cfg_fcp_io_channel)
400 return; 403 return;
401 } else { 404 } else {
402 if (fcp_cqidx > 0) 405 if (fcp_cqidx > 0)
@@ -410,7 +413,7 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
410} 413}
411 414
412/** 415/**
413 * lpfc_debug_dump_fcp_eq - dump all entries from a fcp work queue's evt queue 416 * lpfc_debug_dump_hba_eq - dump all entries from a fcp work queue's evt queue
414 * @phba: Pointer to HBA context object. 417 * @phba: Pointer to HBA context object.
415 * @fcp_wqidx: Index to a FCP work queue. 418 * @fcp_wqidx: Index to a FCP work queue.
416 * 419 *
@@ -418,36 +421,30 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
418 * associated to the FCP work queue specified by the @fcp_wqidx. 421 * associated to the FCP work queue specified by the @fcp_wqidx.
419 **/ 422 **/
420static inline void 423static inline void
421lpfc_debug_dump_fcp_eq(struct lpfc_hba *phba, int fcp_wqidx) 424lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int fcp_wqidx)
422{ 425{
423 struct lpfc_queue *qdesc; 426 struct lpfc_queue *qdesc;
424 int fcp_eqidx, fcp_eqid; 427 int fcp_eqidx, fcp_eqid;
425 int fcp_cqidx, fcp_cqid; 428 int fcp_cqidx, fcp_cqid;
426 429
427 /* sanity check */ 430 /* sanity check */
428 if (fcp_wqidx >= phba->cfg_fcp_wq_count) 431 if (fcp_wqidx >= phba->cfg_fcp_io_channel)
429 return; 432 return;
430 fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid; 433 fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
431 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) 434 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
432 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid) 435 if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
433 break; 436 break;
434 if (phba->intr_type == MSIX) { 437 if (phba->intr_type == MSIX) {
435 if (fcp_cqidx >= phba->cfg_fcp_eq_count) 438 if (fcp_cqidx >= phba->cfg_fcp_io_channel)
436 return; 439 return;
437 } else { 440 } else {
438 if (fcp_cqidx > 0) 441 if (fcp_cqidx > 0)
439 return; 442 return;
440 } 443 }
441 444
442 if (phba->cfg_fcp_eq_count == 0) { 445 fcp_eqidx = fcp_cqidx;
443 fcp_eqidx = -1; 446 fcp_eqid = phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id;
444 fcp_eqid = phba->sli4_hba.sp_eq->queue_id; 447 qdesc = phba->sli4_hba.hba_eq[fcp_eqidx];
445 qdesc = phba->sli4_hba.sp_eq;
446 } else {
447 fcp_eqidx = fcp_cqidx;
448 fcp_eqid = phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id;
449 qdesc = phba->sli4_hba.fp_eq[fcp_eqidx];
450 }
451 448
452 printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->" 449 printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->"
453 "EQ[Idx:%d|Qid:%d]\n", 450 "EQ[Idx:%d|Qid:%d]\n",
@@ -543,25 +540,6 @@ lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba)
543} 540}
544 541
545/** 542/**
546 * lpfc_debug_dump_sp_eq - dump all entries from slow-path event queue
547 * @phba: Pointer to HBA context object.
548 *
549 * This function dumps all entries from the slow-path event queue.
550 **/
551static inline void
552lpfc_debug_dump_sp_eq(struct lpfc_hba *phba)
553{
554 printk(KERN_ERR "SP EQ: WQ[Qid:%d/Qid:%d]->CQ[Qid:%d/Qid:%d]->"
555 "EQ[Qid:%d]:\n",
556 phba->sli4_hba.mbx_wq->queue_id,
557 phba->sli4_hba.els_wq->queue_id,
558 phba->sli4_hba.mbx_cq->queue_id,
559 phba->sli4_hba.els_cq->queue_id,
560 phba->sli4_hba.sp_eq->queue_id);
561 lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
562}
563
564/**
565 * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id 543 * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id
566 * @phba: Pointer to HBA context object. 544 * @phba: Pointer to HBA context object.
567 * @qid: Work queue identifier. 545 * @qid: Work queue identifier.
@@ -574,10 +552,10 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
574{ 552{
575 int wq_idx; 553 int wq_idx;
576 554
577 for (wq_idx = 0; wq_idx < phba->cfg_fcp_wq_count; wq_idx++) 555 for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++)
578 if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid) 556 if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
579 break; 557 break;
580 if (wq_idx < phba->cfg_fcp_wq_count) { 558 if (wq_idx < phba->cfg_fcp_io_channel) {
581 printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); 559 printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
582 lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]); 560 lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
583 return; 561 return;
@@ -644,9 +622,9 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
644 do { 622 do {
645 if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid) 623 if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
646 break; 624 break;
647 } while (++cq_idx < phba->cfg_fcp_eq_count); 625 } while (++cq_idx < phba->cfg_fcp_io_channel);
648 626
649 if (cq_idx < phba->cfg_fcp_eq_count) { 627 if (cq_idx < phba->cfg_fcp_io_channel) {
650 printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); 628 printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
651 lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]); 629 lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
652 return; 630 return;
@@ -677,21 +655,17 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
677{ 655{
678 int eq_idx; 656 int eq_idx;
679 657
680 for (eq_idx = 0; eq_idx < phba->cfg_fcp_eq_count; eq_idx++) { 658 for (eq_idx = 0; eq_idx < phba->cfg_fcp_io_channel; eq_idx++) {
681 if (phba->sli4_hba.fp_eq[eq_idx]->queue_id == qid) 659 if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
682 break; 660 break;
683 } 661 }
684 662
685 if (eq_idx < phba->cfg_fcp_eq_count) { 663 if (eq_idx < phba->cfg_fcp_io_channel) {
686 printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid); 664 printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
687 lpfc_debug_dump_q(phba->sli4_hba.fp_eq[eq_idx]); 665 lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
688 return; 666 return;
689 } 667 }
690 668
691 if (phba->sli4_hba.sp_eq->queue_id == qid) {
692 printk(KERN_ERR "SP EQ[|Qid:%d]\n", qid);
693 lpfc_debug_dump_q(phba->sli4_hba.sp_eq);
694 }
695} 669}
696 670
697void lpfc_debug_dump_all_queues(struct lpfc_hba *); 671void lpfc_debug_dump_all_queues(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 1d84b63fcca..af49fb03dbb 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -145,6 +145,7 @@ struct lpfc_node_rrq {
145#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */ 145#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */
146#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */ 146#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */
147#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */ 147#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */
148#define NLP_ISSUE_LOGO 0x00400000 /* waiting to issue a LOGO */
148#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful 149#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful
149 ACC */ 150 ACC */
150#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from 151#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from
@@ -201,10 +202,11 @@ struct lpfc_node_rrq {
201#define NLP_STE_ADISC_ISSUE 0x2 /* ADISC was sent to NL_PORT */ 202#define NLP_STE_ADISC_ISSUE 0x2 /* ADISC was sent to NL_PORT */
202#define NLP_STE_REG_LOGIN_ISSUE 0x3 /* REG_LOGIN was issued for NL_PORT */ 203#define NLP_STE_REG_LOGIN_ISSUE 0x3 /* REG_LOGIN was issued for NL_PORT */
203#define NLP_STE_PRLI_ISSUE 0x4 /* PRLI was sent to NL_PORT */ 204#define NLP_STE_PRLI_ISSUE 0x4 /* PRLI was sent to NL_PORT */
204#define NLP_STE_UNMAPPED_NODE 0x5 /* PRLI completed from NL_PORT */ 205#define NLP_STE_LOGO_ISSUE 0x5 /* LOGO was sent to NL_PORT */
205#define NLP_STE_MAPPED_NODE 0x6 /* Identified as a FCP Target */ 206#define NLP_STE_UNMAPPED_NODE 0x6 /* PRLI completed from NL_PORT */
206#define NLP_STE_NPR_NODE 0x7 /* NPort disappeared */ 207#define NLP_STE_MAPPED_NODE 0x7 /* Identified as a FCP Target */
207#define NLP_STE_MAX_STATE 0x8 208#define NLP_STE_NPR_NODE 0x8 /* NPort disappeared */
209#define NLP_STE_MAX_STATE 0x9
208#define NLP_STE_FREED_NODE 0xff /* node entry was freed to MEM_NLP */ 210#define NLP_STE_FREED_NODE 0xff /* node entry was freed to MEM_NLP */
209 211
210/* For UNUSED_NODE state, the node has just been allocated. 212/* For UNUSED_NODE state, the node has just been allocated.
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index d54ae199979..cfe533bc979 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -962,7 +962,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
962 if ((phba->fcoe_cvl_eventtag_attn == 962 if ((phba->fcoe_cvl_eventtag_attn ==
963 phba->fcoe_cvl_eventtag) && 963 phba->fcoe_cvl_eventtag) &&
964 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 964 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
965 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)) 965 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
966 IOERR_SLI_ABORTED))
966 goto stop_rr_fcf_flogi; 967 goto stop_rr_fcf_flogi;
967 else 968 else
968 phba->fcoe_cvl_eventtag_attn = 969 phba->fcoe_cvl_eventtag_attn =
@@ -1108,8 +1109,10 @@ flogifail:
1108 /* Start discovery */ 1109 /* Start discovery */
1109 lpfc_disc_start(vport); 1110 lpfc_disc_start(vport);
1110 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1111 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
1111 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) && 1112 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1112 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) && 1113 IOERR_SLI_ABORTED) &&
1114 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1115 IOERR_SLI_DOWN))) &&
1113 (phba->link_state != LPFC_CLEAR_LA)) { 1116 (phba->link_state != LPFC_CLEAR_LA)) {
1114 /* If FLOGI failed enable link interrupt. */ 1117 /* If FLOGI failed enable link interrupt. */
1115 lpfc_issue_clear_la(phba, vport); 1118 lpfc_issue_clear_la(phba, vport);
@@ -1476,6 +1479,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1476 return ndlp; 1479 return ndlp;
1477 memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap)); 1480 memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap));
1478 1481
1482 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1483 "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
1484 ndlp, ndlp->nlp_DID, new_ndlp);
1485
1479 if (!new_ndlp) { 1486 if (!new_ndlp) {
1480 rc = memcmp(&ndlp->nlp_portname, name, 1487 rc = memcmp(&ndlp->nlp_portname, name,
1481 sizeof(struct lpfc_name)); 1488 sizeof(struct lpfc_name));
@@ -1527,6 +1534,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1527 /* The new_ndlp is replacing ndlp totally, so we need 1534 /* The new_ndlp is replacing ndlp totally, so we need
1528 * to put ndlp on UNUSED list and try to free it. 1535 * to put ndlp on UNUSED list and try to free it.
1529 */ 1536 */
1537 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1538 "3179 PLOGI confirm NEW: %x %x\n",
1539 new_ndlp->nlp_DID, keepDID);
1530 1540
1531 /* Fix up the rport accordingly */ 1541 /* Fix up the rport accordingly */
1532 rport = ndlp->rport; 1542 rport = ndlp->rport;
@@ -1559,23 +1569,34 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1559 lpfc_drop_node(vport, ndlp); 1569 lpfc_drop_node(vport, ndlp);
1560 } 1570 }
1561 else { 1571 else {
1572 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1573 "3180 PLOGI confirm SWAP: %x %x\n",
1574 new_ndlp->nlp_DID, keepDID);
1575
1562 lpfc_unreg_rpi(vport, ndlp); 1576 lpfc_unreg_rpi(vport, ndlp);
1577
1563 /* Two ndlps cannot have the same did */ 1578 /* Two ndlps cannot have the same did */
1564 ndlp->nlp_DID = keepDID; 1579 ndlp->nlp_DID = keepDID;
1565 if (phba->sli_rev == LPFC_SLI_REV4) 1580 if (phba->sli_rev == LPFC_SLI_REV4)
1566 memcpy(&ndlp->active_rrqs.xri_bitmap, 1581 memcpy(&ndlp->active_rrqs.xri_bitmap,
1567 &rrq.xri_bitmap, 1582 &rrq.xri_bitmap,
1568 sizeof(ndlp->active_rrqs.xri_bitmap)); 1583 sizeof(ndlp->active_rrqs.xri_bitmap));
1584
1569 /* Since we are swapping the ndlp passed in with the new one 1585 /* Since we are swapping the ndlp passed in with the new one
1570 * and the did has already been swapped, copy over the 1586 * and the did has already been swapped, copy over state.
1571 * state and names. 1587 * The new WWNs are already in new_ndlp since thats what
1588 * we looked it up by in the begining of this routine.
1572 */ 1589 */
1573 memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname,
1574 sizeof(struct lpfc_name));
1575 memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename,
1576 sizeof(struct lpfc_name));
1577 new_ndlp->nlp_state = ndlp->nlp_state; 1590 new_ndlp->nlp_state = ndlp->nlp_state;
1578 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1591
1592 /* Since we are switching over to the new_ndlp, the old
1593 * ndlp should be put in the NPR state, unless we have
1594 * already started re-discovery on it.
1595 */
1596 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
1597 (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
1598 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1599
1579 /* Fix up the rport accordingly */ 1600 /* Fix up the rport accordingly */
1580 rport = ndlp->rport; 1601 rport = ndlp->rport;
1581 if (rport) { 1602 if (rport) {
@@ -2367,6 +2388,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2367 IOCB_t *irsp; 2388 IOCB_t *irsp;
2368 struct lpfc_sli *psli; 2389 struct lpfc_sli *psli;
2369 struct lpfcMboxq *mbox; 2390 struct lpfcMboxq *mbox;
2391 unsigned long flags;
2392 uint32_t skip_recovery = 0;
2370 2393
2371 psli = &phba->sli; 2394 psli = &phba->sli;
2372 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2395 /* we pass cmdiocb to state machine which needs rspiocb as well */
@@ -2381,47 +2404,52 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2381 "LOGO cmpl: status:x%x/x%x did:x%x", 2404 "LOGO cmpl: status:x%x/x%x did:x%x",
2382 irsp->ulpStatus, irsp->un.ulpWord[4], 2405 irsp->ulpStatus, irsp->un.ulpWord[4],
2383 ndlp->nlp_DID); 2406 ndlp->nlp_DID);
2407
2384 /* LOGO completes to NPort <nlp_DID> */ 2408 /* LOGO completes to NPort <nlp_DID> */
2385 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2409 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2386 "0105 LOGO completes to NPort x%x " 2410 "0105 LOGO completes to NPort x%x "
2387 "Data: x%x x%x x%x x%x\n", 2411 "Data: x%x x%x x%x x%x\n",
2388 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2412 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2389 irsp->ulpTimeout, vport->num_disc_nodes); 2413 irsp->ulpTimeout, vport->num_disc_nodes);
2390 /* Check to see if link went down during discovery */ 2414
2391 if (lpfc_els_chk_latt(vport)) 2415 if (lpfc_els_chk_latt(vport)) {
2416 skip_recovery = 1;
2392 goto out; 2417 goto out;
2418 }
2393 2419
2420 /* Check to see if link went down during discovery */
2394 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 2421 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
2395 /* NLP_EVT_DEVICE_RM should unregister the RPI 2422 /* NLP_EVT_DEVICE_RM should unregister the RPI
2396 * which should abort all outstanding IOs. 2423 * which should abort all outstanding IOs.
2397 */ 2424 */
2398 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2425 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2399 NLP_EVT_DEVICE_RM); 2426 NLP_EVT_DEVICE_RM);
2427 skip_recovery = 1;
2400 goto out; 2428 goto out;
2401 } 2429 }
2402 2430
2403 if (irsp->ulpStatus) { 2431 if (irsp->ulpStatus) {
2404 /* Check for retry */ 2432 /* Check for retry */
2405 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 2433 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2406 /* ELS command is being retried */ 2434 /* ELS command is being retried */
2435 skip_recovery = 1;
2407 goto out; 2436 goto out;
2437 }
2408 /* LOGO failed */ 2438 /* LOGO failed */
2409 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2439 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2410 "2756 LOGO failure DID:%06X Status:x%x/x%x\n", 2440 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2411 ndlp->nlp_DID, irsp->ulpStatus, 2441 ndlp->nlp_DID, irsp->ulpStatus,
2412 irsp->un.ulpWord[4]); 2442 irsp->un.ulpWord[4]);
2413 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2443 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2414 if (lpfc_error_lost_link(irsp)) 2444 if (lpfc_error_lost_link(irsp)) {
2445 skip_recovery = 1;
2415 goto out; 2446 goto out;
2416 else 2447 }
2417 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2448 }
2418 NLP_EVT_CMPL_LOGO); 2449
2419 } else 2450 /* Call state machine. This will unregister the rpi if needed. */
2420 /* Good status, call state machine. 2451 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
2421 * This will unregister the rpi if needed. 2452
2422 */
2423 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2424 NLP_EVT_CMPL_LOGO);
2425out: 2453out:
2426 lpfc_els_free_iocb(phba, cmdiocb); 2454 lpfc_els_free_iocb(phba, cmdiocb);
2427 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */ 2455 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
@@ -2436,9 +2464,30 @@ out:
2436 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 2464 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
2437 MBX_NOT_FINISHED) { 2465 MBX_NOT_FINISHED) {
2438 mempool_free(mbox, phba->mbox_mem_pool); 2466 mempool_free(mbox, phba->mbox_mem_pool);
2467 skip_recovery = 1;
2439 } 2468 }
2440 } 2469 }
2441 } 2470 }
2471
2472 /*
2473 * If the node is a target, the handling attempts to recover the port.
2474 * For any other port type, the rpi is unregistered as an implicit
2475 * LOGO.
2476 */
2477 if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
2478 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2479 spin_lock_irqsave(shost->host_lock, flags);
2480 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2481 spin_unlock_irqrestore(shost->host_lock, flags);
2482
2483 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2484 "3187 LOGO completes to NPort x%x: Start "
2485 "Recovery Data: x%x x%x x%x x%x\n",
2486 ndlp->nlp_DID, irsp->ulpStatus,
2487 irsp->un.ulpWord[4], irsp->ulpTimeout,
2488 vport->num_disc_nodes);
2489 lpfc_disc_start(vport);
2490 }
2442 return; 2491 return;
2443} 2492}
2444 2493
@@ -2501,10 +2550,27 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2501 "Issue LOGO: did:x%x", 2550 "Issue LOGO: did:x%x",
2502 ndlp->nlp_DID, 0, 0); 2551 ndlp->nlp_DID, 0, 0);
2503 2552
2553 /*
2554 * If we are issuing a LOGO, we may try to recover the remote NPort
2555 * by issuing a PLOGI later. Even though we issue ELS cmds by the
2556 * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
2557 * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
2558 * for that ELS cmd. To avoid this situation, lets get rid of the
2559 * RPI right now, before any ELS cmds are sent.
2560 */
2561 spin_lock_irq(shost->host_lock);
2562 ndlp->nlp_flag |= NLP_ISSUE_LOGO;
2563 spin_unlock_irq(shost->host_lock);
2564 if (lpfc_unreg_rpi(vport, ndlp)) {
2565 lpfc_els_free_iocb(phba, elsiocb);
2566 return 0;
2567 }
2568
2504 phba->fc_stat.elsXmitLOGO++; 2569 phba->fc_stat.elsXmitLOGO++;
2505 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 2570 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2506 spin_lock_irq(shost->host_lock); 2571 spin_lock_irq(shost->host_lock);
2507 ndlp->nlp_flag |= NLP_LOGO_SND; 2572 ndlp->nlp_flag |= NLP_LOGO_SND;
2573 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
2508 spin_unlock_irq(shost->host_lock); 2574 spin_unlock_irq(shost->host_lock);
2509 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2575 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2510 2576
@@ -2920,7 +2986,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
2920 case ELS_CMD_LOGO: 2986 case ELS_CMD_LOGO:
2921 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 2987 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
2922 ndlp->nlp_prev_state = ndlp->nlp_state; 2988 ndlp->nlp_prev_state = ndlp->nlp_state;
2923 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 2989 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
2924 } 2990 }
2925 break; 2991 break;
2926 case ELS_CMD_FDISC: 2992 case ELS_CMD_FDISC:
@@ -3007,7 +3073,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3007 } 3073 }
3008 break; 3074 break;
3009 case IOSTAT_LOCAL_REJECT: 3075 case IOSTAT_LOCAL_REJECT:
3010 switch ((irsp->un.ulpWord[4] & 0xff)) { 3076 switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
3011 case IOERR_LOOP_OPEN_FAILURE: 3077 case IOERR_LOOP_OPEN_FAILURE:
3012 if (cmd == ELS_CMD_FLOGI) { 3078 if (cmd == ELS_CMD_FLOGI) {
3013 if (PCI_DEVICE_ID_HORNET == 3079 if (PCI_DEVICE_ID_HORNET ==
@@ -3214,7 +3280,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3214 3280
3215 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 3281 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
3216 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 3282 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
3217 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) { 3283 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
3284 IOERR_NO_RESOURCES))) {
3218 /* Don't reset timer for no resources */ 3285 /* Don't reset timer for no resources */
3219 3286
3220 /* If discovery / RSCN timer is running, reset it */ 3287 /* If discovery / RSCN timer is running, reset it */
@@ -3273,7 +3340,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3273 return 1; 3340 return 1;
3274 case ELS_CMD_LOGO: 3341 case ELS_CMD_LOGO:
3275 ndlp->nlp_prev_state = ndlp->nlp_state; 3342 ndlp->nlp_prev_state = ndlp->nlp_state;
3276 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 3343 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3277 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 3344 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
3278 return 1; 3345 return 1;
3279 } 3346 }
@@ -3533,13 +3600,17 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3533 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3600 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3534 kfree(mp); 3601 kfree(mp);
3535 mempool_free(pmb, phba->mbox_mem_pool); 3602 mempool_free(pmb, phba->mbox_mem_pool);
3536 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 3603 if (ndlp) {
3537 lpfc_nlp_put(ndlp); 3604 if (NLP_CHK_NODE_ACT(ndlp)) {
3538 /* This is the end of the default RPI cleanup logic for this 3605 lpfc_nlp_put(ndlp);
3539 * ndlp. If no other discovery threads are using this ndlp. 3606 /* This is the end of the default RPI cleanup logic for
3540 * we should free all resources associated with it. 3607 * this ndlp. If no other discovery threads are using
3541 */ 3608 * this ndlp, free all resources associated with it.
3542 lpfc_nlp_not_used(ndlp); 3609 */
3610 lpfc_nlp_not_used(ndlp);
3611 } else {
3612 lpfc_drop_node(ndlp->vport, ndlp);
3613 }
3543 } 3614 }
3544 3615
3545 return; 3616 return;
@@ -6803,7 +6874,8 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6803 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { 6874 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
6804 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 6875 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
6805 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && 6876 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
6806 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) { 6877 (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
6878 IOERR_RCV_BUFFER_WAITING) {
6807 phba->fc_stat.NoRcvBuf++; 6879 phba->fc_stat.NoRcvBuf++;
6808 /* Not enough posted buffers; Try posting more buffers */ 6880 /* Not enough posted buffers; Try posting more buffers */
6809 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 6881 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
@@ -7985,3 +8057,47 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
7985 spin_unlock_irqrestore(&phba->hbalock, iflag); 8057 spin_unlock_irqrestore(&phba->hbalock, iflag);
7986 return; 8058 return;
7987} 8059}
8060
8061/* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
8062 * @vport: pointer to virtual port object.
8063 * @ndlp: nodelist pointer for the impacted node.
8064 *
8065 * The driver calls this routine in response to an SLI4 XRI ABORT CQE
8066 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event,
8067 * the driver is required to send a LOGO to the remote node before it
8068 * attempts to recover its login to the remote node.
8069 */
8070void
8071lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
8072 struct lpfc_nodelist *ndlp)
8073{
8074 struct Scsi_Host *shost;
8075 struct lpfc_hba *phba;
8076 unsigned long flags = 0;
8077
8078 shost = lpfc_shost_from_vport(vport);
8079 phba = vport->phba;
8080 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
8081 lpfc_printf_log(phba, KERN_INFO,
8082 LOG_SLI, "3093 No rport recovery needed. "
8083 "rport in state 0x%x\n", ndlp->nlp_state);
8084 return;
8085 }
8086 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8087 "3094 Start rport recovery on shost id 0x%x "
8088 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
8089 "flags 0x%x\n",
8090 shost->host_no, ndlp->nlp_DID,
8091 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
8092 ndlp->nlp_flag);
8093 /*
8094 * The rport is not responding. Remove the FCP-2 flag to prevent
8095 * an ADISC in the follow-up recovery code.
8096 */
8097 spin_lock_irqsave(shost->host_lock, flags);
8098 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
8099 spin_unlock_irqrestore(shost->host_lock, flags);
8100 lpfc_issue_els_logo(vport, ndlp, 0);
8101 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
8102}
8103
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 9b4f92941dc..e9845d2ecf1 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -123,6 +123,10 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
123 "rport devlosscb: sid:x%x did:x%x flg:x%x", 123 "rport devlosscb: sid:x%x did:x%x flg:x%x",
124 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); 124 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
125 125
126 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
127 "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
128 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
129
126 /* Don't defer this if we are in the process of deleting the vport 130 /* Don't defer this if we are in the process of deleting the vport
127 * or unloading the driver. The unload will cleanup the node 131 * or unloading the driver. The unload will cleanup the node
128 * appropriately we just need to cleanup the ndlp rport info here. 132 * appropriately we just need to cleanup the ndlp rport info here.
@@ -142,6 +146,15 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
142 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) 146 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
143 return; 147 return;
144 148
149 if (ndlp->nlp_type & NLP_FABRIC) {
150
151 /* If the WWPN of the rport and ndlp don't match, ignore it */
152 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) {
153 put_device(&rport->dev);
154 return;
155 }
156 }
157
145 evtp = &ndlp->dev_loss_evt; 158 evtp = &ndlp->dev_loss_evt;
146 159
147 if (!list_empty(&evtp->evt_listp)) 160 if (!list_empty(&evtp->evt_listp))
@@ -202,6 +215,10 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
202 "rport devlosstmo:did:x%x type:x%x id:x%x", 215 "rport devlosstmo:did:x%x type:x%x id:x%x",
203 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); 216 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
204 217
218 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
219 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
220 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
221
205 /* Don't defer this if we are in the process of deleting the vport 222 /* Don't defer this if we are in the process of deleting the vport
206 * or unloading the driver. The unload will cleanup the node 223 * or unloading the driver. The unload will cleanup the node
207 * appropriately we just need to cleanup the ndlp rport info here. 224 * appropriately we just need to cleanup the ndlp rport info here.
@@ -3492,7 +3509,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
3492 LPFC_MBOXQ_t *pmb = NULL; 3509 LPFC_MBOXQ_t *pmb = NULL;
3493 MAILBOX_t *mb; 3510 MAILBOX_t *mb;
3494 struct static_vport_info *vport_info; 3511 struct static_vport_info *vport_info;
3495 int rc = 0, i; 3512 int mbx_wait_rc = 0, i;
3496 struct fc_vport_identifiers vport_id; 3513 struct fc_vport_identifiers vport_id;
3497 struct fc_vport *new_fc_vport; 3514 struct fc_vport *new_fc_vport;
3498 struct Scsi_Host *shost; 3515 struct Scsi_Host *shost;
@@ -3509,7 +3526,7 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
3509 " allocate mailbox memory\n"); 3526 " allocate mailbox memory\n");
3510 return; 3527 return;
3511 } 3528 }
3512 3529 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
3513 mb = &pmb->u.mb; 3530 mb = &pmb->u.mb;
3514 3531
3515 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); 3532 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
@@ -3523,24 +3540,31 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
3523 3540
3524 vport_buff = (uint8_t *) vport_info; 3541 vport_buff = (uint8_t *) vport_info;
3525 do { 3542 do {
3543 /* free dma buffer from previous round */
3544 if (pmb->context1) {
3545 mp = (struct lpfc_dmabuf *)pmb->context1;
3546 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3547 kfree(mp);
3548 }
3526 if (lpfc_dump_static_vport(phba, pmb, offset)) 3549 if (lpfc_dump_static_vport(phba, pmb, offset))
3527 goto out; 3550 goto out;
3528 3551
3529 pmb->vport = phba->pport; 3552 pmb->vport = phba->pport;
3530 rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO); 3553 mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
3554 LPFC_MBOX_TMO);
3531 3555
3532 if ((rc != MBX_SUCCESS) || mb->mbxStatus) { 3556 if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
3533 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3557 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3534 "0544 lpfc_create_static_vport failed to" 3558 "0544 lpfc_create_static_vport failed to"
3535 " issue dump mailbox command ret 0x%x " 3559 " issue dump mailbox command ret 0x%x "
3536 "status 0x%x\n", 3560 "status 0x%x\n",
3537 rc, mb->mbxStatus); 3561 mbx_wait_rc, mb->mbxStatus);
3538 goto out; 3562 goto out;
3539 } 3563 }
3540 3564
3541 if (phba->sli_rev == LPFC_SLI_REV4) { 3565 if (phba->sli_rev == LPFC_SLI_REV4) {
3542 byte_count = pmb->u.mqe.un.mb_words[5]; 3566 byte_count = pmb->u.mqe.un.mb_words[5];
3543 mp = (struct lpfc_dmabuf *) pmb->context2; 3567 mp = (struct lpfc_dmabuf *)pmb->context1;
3544 if (byte_count > sizeof(struct static_vport_info) - 3568 if (byte_count > sizeof(struct static_vport_info) -
3545 offset) 3569 offset)
3546 byte_count = sizeof(struct static_vport_info) 3570 byte_count = sizeof(struct static_vport_info)
@@ -3604,9 +3628,9 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
3604 3628
3605out: 3629out:
3606 kfree(vport_info); 3630 kfree(vport_info);
3607 if (rc != MBX_TIMEOUT) { 3631 if (mbx_wait_rc != MBX_TIMEOUT) {
3608 if (pmb->context2) { 3632 if (pmb->context1) {
3609 mp = (struct lpfc_dmabuf *) pmb->context2; 3633 mp = (struct lpfc_dmabuf *)pmb->context1;
3610 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3634 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3611 kfree(mp); 3635 kfree(mp);
3612 } 3636 }
@@ -3834,6 +3858,10 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3834 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 3858 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
3835 fc_remote_port_rolechg(rport, rport_ids.roles); 3859 fc_remote_port_rolechg(rport, rport_ids.roles);
3836 3860
3861 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3862 "3183 rport register x%06x, rport %p role x%x\n",
3863 ndlp->nlp_DID, rport, rport_ids.roles);
3864
3837 if ((rport->scsi_target_id != -1) && 3865 if ((rport->scsi_target_id != -1) &&
3838 (rport->scsi_target_id < LPFC_MAX_TARGET)) { 3866 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
3839 ndlp->nlp_sid = rport->scsi_target_id; 3867 ndlp->nlp_sid = rport->scsi_target_id;
@@ -3850,6 +3878,10 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
3850 "rport delete: did:x%x flg:x%x type x%x", 3878 "rport delete: did:x%x flg:x%x type x%x",
3851 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 3879 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
3852 3880
3881 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3882 "3184 rport unregister x%06x, rport %p\n",
3883 ndlp->nlp_DID, rport);
3884
3853 fc_remote_port_delete(rport); 3885 fc_remote_port_delete(rport);
3854 3886
3855 return; 3887 return;
@@ -3964,6 +3996,7 @@ lpfc_nlp_state_name(char *buffer, size_t size, int state)
3964 [NLP_STE_ADISC_ISSUE] = "ADISC", 3996 [NLP_STE_ADISC_ISSUE] = "ADISC",
3965 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN", 3997 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
3966 [NLP_STE_PRLI_ISSUE] = "PRLI", 3998 [NLP_STE_PRLI_ISSUE] = "PRLI",
3999 [NLP_STE_LOGO_ISSUE] = "LOGO",
3967 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED", 4000 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
3968 [NLP_STE_MAPPED_NODE] = "MAPPED", 4001 [NLP_STE_MAPPED_NODE] = "MAPPED",
3969 [NLP_STE_NPR_NODE] = "NPR", 4002 [NLP_STE_NPR_NODE] = "NPR",
@@ -4330,6 +4363,26 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
4330 return 0; 4363 return 0;
4331} 4364}
4332 4365
4366/**
4367 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
4368 * @phba: Pointer to HBA context object.
4369 * @pmb: Pointer to mailbox object.
4370 *
4371 * This function will issue an ELS LOGO command after completing
4372 * the UNREG_RPI.
4373 **/
4374void
4375lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4376{
4377 struct lpfc_vport *vport = pmb->vport;
4378 struct lpfc_nodelist *ndlp;
4379
4380 ndlp = (struct lpfc_nodelist *)(pmb->context1);
4381 if (!ndlp)
4382 return;
4383 lpfc_issue_els_logo(vport, ndlp, 0);
4384}
4385
4333/* 4386/*
4334 * Free rpi associated with LPFC_NODELIST entry. 4387 * Free rpi associated with LPFC_NODELIST entry.
4335 * This routine is called from lpfc_freenode(), when we are removing 4388 * This routine is called from lpfc_freenode(), when we are removing
@@ -4354,9 +4407,16 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4354 rpi = ndlp->nlp_rpi; 4407 rpi = ndlp->nlp_rpi;
4355 if (phba->sli_rev == LPFC_SLI_REV4) 4408 if (phba->sli_rev == LPFC_SLI_REV4)
4356 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 4409 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4410
4357 lpfc_unreg_login(phba, vport->vpi, rpi, mbox); 4411 lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
4358 mbox->vport = vport; 4412 mbox->vport = vport;
4359 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4413 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
4414 mbox->context1 = ndlp;
4415 mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
4416 } else {
4417 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4418 }
4419
4360 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4420 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4361 if (rc == MBX_NOT_FINISHED) 4421 if (rc == MBX_NOT_FINISHED)
4362 mempool_free(mbox, phba->mbox_mem_pool); 4422 mempool_free(mbox, phba->mbox_mem_pool);
@@ -4499,9 +4559,13 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4499 lpfc_disable_node(vport, ndlp); 4559 lpfc_disable_node(vport, ndlp);
4500 } 4560 }
4501 4561
4562
4563 /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
4564
4502 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 4565 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
4503 if ((mb = phba->sli.mbox_active)) { 4566 if ((mb = phba->sli.mbox_active)) {
4504 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 4567 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
4568 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
4505 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 4569 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
4506 mb->context2 = NULL; 4570 mb->context2 = NULL;
4507 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4571 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -4512,6 +4576,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4512 /* Cleanup REG_LOGIN completions which are not yet processed */ 4576 /* Cleanup REG_LOGIN completions which are not yet processed */
4513 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 4577 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
4514 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) || 4578 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
4579 (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
4515 (ndlp != (struct lpfc_nodelist *) mb->context2)) 4580 (ndlp != (struct lpfc_nodelist *) mb->context2))
4516 continue; 4581 continue;
4517 4582
@@ -4521,6 +4586,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4521 4586
4522 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 4587 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
4523 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 4588 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
4589 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
4524 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 4590 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
4525 mp = (struct lpfc_dmabuf *) (mb->context1); 4591 mp = (struct lpfc_dmabuf *) (mb->context1);
4526 if (mp) { 4592 if (mp) {
@@ -4585,7 +4651,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4585 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 4651 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
4586 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 4652 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
4587 mbox->vport = vport; 4653 mbox->vport = vport;
4588 mbox->context2 = NULL; 4654 mbox->context2 = ndlp;
4589 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4655 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4590 if (rc == MBX_NOT_FINISHED) { 4656 if (rc == MBX_NOT_FINISHED) {
4591 mempool_free(mbox, phba->mbox_mem_pool); 4657 mempool_free(mbox, phba->mbox_mem_pool);
@@ -5365,9 +5431,17 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
5365 struct lpfc_nodelist *ndlp; 5431 struct lpfc_nodelist *ndlp;
5366 5432
5367 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 5433 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5368 if (filter(ndlp, param)) 5434 if (filter(ndlp, param)) {
5435 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5436 "3185 FIND node filter %p DID "
5437 "Data: x%p x%x x%x\n",
5438 filter, ndlp, ndlp->nlp_DID,
5439 ndlp->nlp_flag);
5369 return ndlp; 5440 return ndlp;
5441 }
5370 } 5442 }
5443 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5444 "3186 FIND node filter %p NOT FOUND.\n", filter);
5371 return NULL; 5445 return NULL;
5372} 5446}
5373 5447
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 41bb1d2fb62..7398ca862e9 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1188,8 +1188,8 @@ typedef struct {
1188 */ 1188 */
1189 1189
1190/* Number of rings currently used and available. */ 1190/* Number of rings currently used and available. */
1191#define MAX_CONFIGURED_RINGS 3 1191#define MAX_SLI3_CONFIGURED_RINGS 3
1192#define MAX_RINGS 4 1192#define MAX_SLI3_RINGS 4
1193 1193
1194/* IOCB / Mailbox is owned by FireFly */ 1194/* IOCB / Mailbox is owned by FireFly */
1195#define OWN_CHIP 1 1195#define OWN_CHIP 1
@@ -1251,6 +1251,8 @@ typedef struct {
1251#define PCI_VENDOR_ID_SERVERENGINE 0x19a2 1251#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
1252#define PCI_DEVICE_ID_TIGERSHARK 0x0704 1252#define PCI_DEVICE_ID_TIGERSHARK 0x0704
1253#define PCI_DEVICE_ID_TOMCAT 0x0714 1253#define PCI_DEVICE_ID_TOMCAT 0x0714
1254#define PCI_DEVICE_ID_SKYHAWK 0x0724
1255#define PCI_DEVICE_ID_SKYHAWK_VF 0x072c
1254 1256
1255#define JEDEC_ID_ADDRESS 0x0080001c 1257#define JEDEC_ID_ADDRESS 0x0080001c
1256#define FIREFLY_JEDEC_ID 0x1ACC 1258#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1458,6 +1460,7 @@ typedef struct { /* FireFly BIU registers */
1458#define MBX_UNREG_FCFI 0xA2 1460#define MBX_UNREG_FCFI 0xA2
1459#define MBX_INIT_VFI 0xA3 1461#define MBX_INIT_VFI 0xA3
1460#define MBX_INIT_VPI 0xA4 1462#define MBX_INIT_VPI 0xA4
1463#define MBX_ACCESS_VDATA 0xA5
1461 1464
1462#define MBX_AUTH_PORT 0xF8 1465#define MBX_AUTH_PORT 0xF8
1463#define MBX_SECURITY_MGMT 0xF9 1466#define MBX_SECURITY_MGMT 0xF9
@@ -2991,7 +2994,7 @@ typedef struct _PCB {
2991 2994
2992 uint32_t pgpAddrLow; 2995 uint32_t pgpAddrLow;
2993 uint32_t pgpAddrHigh; 2996 uint32_t pgpAddrHigh;
2994 SLI2_RDSC rdsc[MAX_RINGS]; 2997 SLI2_RDSC rdsc[MAX_SLI3_RINGS];
2995} PCB_t; 2998} PCB_t;
2996 2999
2997/* NEW_FEATURE */ 3000/* NEW_FEATURE */
@@ -3101,18 +3104,18 @@ struct lpfc_pgp {
3101 3104
3102struct sli2_desc { 3105struct sli2_desc {
3103 uint32_t unused1[16]; 3106 uint32_t unused1[16];
3104 struct lpfc_hgp host[MAX_RINGS]; 3107 struct lpfc_hgp host[MAX_SLI3_RINGS];
3105 struct lpfc_pgp port[MAX_RINGS]; 3108 struct lpfc_pgp port[MAX_SLI3_RINGS];
3106}; 3109};
3107 3110
3108struct sli3_desc { 3111struct sli3_desc {
3109 struct lpfc_hgp host[MAX_RINGS]; 3112 struct lpfc_hgp host[MAX_SLI3_RINGS];
3110 uint32_t reserved[8]; 3113 uint32_t reserved[8];
3111 uint32_t hbq_put[16]; 3114 uint32_t hbq_put[16];
3112}; 3115};
3113 3116
3114struct sli3_pgp { 3117struct sli3_pgp {
3115 struct lpfc_pgp port[MAX_RINGS]; 3118 struct lpfc_pgp port[MAX_SLI3_RINGS];
3116 uint32_t hbq_get[16]; 3119 uint32_t hbq_get[16];
3117}; 3120};
3118 3121
@@ -3242,6 +3245,7 @@ typedef struct {
3242#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */ 3245#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */
3243#define IOERR_SLI_BRESET 0x102 3246#define IOERR_SLI_BRESET 0x102
3244#define IOERR_SLI_ABORTED 0x103 3247#define IOERR_SLI_ABORTED 0x103
3248#define IOERR_PARAM_MASK 0x1ff
3245} PARM_ERR; 3249} PARM_ERR;
3246 3250
3247typedef union { 3251typedef union {
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 953603a7a43..834b699cac7 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -187,11 +187,17 @@ struct lpfc_sli_intf {
187/* Active interrupt test count */ 187/* Active interrupt test count */
188#define LPFC_ACT_INTR_CNT 4 188#define LPFC_ACT_INTR_CNT 4
189 189
190/* Algrithmns for scheduling FCP commands to WQs */
191#define LPFC_FCP_SCHED_ROUND_ROBIN 0
192#define LPFC_FCP_SCHED_BY_CPU 1
193
190/* Delay Multiplier constant */ 194/* Delay Multiplier constant */
191#define LPFC_DMULT_CONST 651042 195#define LPFC_DMULT_CONST 651042
192#define LPFC_MIM_IMAX 636 196
193#define LPFC_FP_DEF_IMAX 10000 197/* Configuration of Interrupts / sec for entire HBA port */
194#define LPFC_SP_DEF_IMAX 10000 198#define LPFC_MIN_IMAX 5000
199#define LPFC_MAX_IMAX 5000000
200#define LPFC_DEF_IMAX 50000
195 201
196/* PORT_CAPABILITIES constants. */ 202/* PORT_CAPABILITIES constants. */
197#define LPFC_MAX_SUPPORTED_PAGES 8 203#define LPFC_MAX_SUPPORTED_PAGES 8
@@ -338,7 +344,7 @@ struct lpfc_cqe {
338 * Define mask value for xri_aborted and wcqe completed CQE extended status. 344 * Define mask value for xri_aborted and wcqe completed CQE extended status.
339 * Currently, extended status is limited to 9 bits (0x0 -> 0x103) . 345 * Currently, extended status is limited to 9 bits (0x0 -> 0x103) .
340 */ 346 */
341#define WCQE_PARAM_MASK 0x1FF; 347#define WCQE_PARAM_MASK 0x1FF
342 348
343/* completion queue entry for wqe completions */ 349/* completion queue entry for wqe completions */
344struct lpfc_wcqe_complete { 350struct lpfc_wcqe_complete {
@@ -880,13 +886,19 @@ struct mbox_header {
880#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 886#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
881#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A 887#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
882#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D 888#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
889#define LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG 0x3E
890#define LPFC_MBOX_OPCODE_SET_BOOT_CONFIG 0x43
883#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D 891#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D
884#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A 892#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
893#define LPFC_MBOX_OPCODE_GET_VPD_DATA 0x5B
894#define LPFC_MBOX_OPCODE_SEND_ACTIVATION 0x73
895#define LPFC_MBOX_OPCODE_RESET_LICENSES 0x74
885#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A 896#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
886#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B 897#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
887#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C 898#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
888#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D 899#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
889#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0 900#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
901#define LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES 0xA1
890#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4 902#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
891#define LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG 0xA5 903#define LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG 0xA5
892#define LPFC_MBOX_OPCODE_GET_PROFILE_LIST 0xA6 904#define LPFC_MBOX_OPCODE_GET_PROFILE_LIST 0xA6
@@ -1382,6 +1394,11 @@ struct lpfc_mbx_set_link_diag_state {
1382#define lpfc_mbx_set_diag_state_diag_SHIFT 0 1394#define lpfc_mbx_set_diag_state_diag_SHIFT 0
1383#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001 1395#define lpfc_mbx_set_diag_state_diag_MASK 0x00000001
1384#define lpfc_mbx_set_diag_state_diag_WORD word0 1396#define lpfc_mbx_set_diag_state_diag_WORD word0
1397#define lpfc_mbx_set_diag_state_diag_bit_valid_SHIFT 2
1398#define lpfc_mbx_set_diag_state_diag_bit_valid_MASK 0x00000001
1399#define lpfc_mbx_set_diag_state_diag_bit_valid_WORD word0
1400#define LPFC_DIAG_STATE_DIAG_BIT_VALID_NO_CHANGE 0
1401#define LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE 1
1385#define lpfc_mbx_set_diag_state_link_num_SHIFT 16 1402#define lpfc_mbx_set_diag_state_link_num_SHIFT 16
1386#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F 1403#define lpfc_mbx_set_diag_state_link_num_MASK 0x0000003F
1387#define lpfc_mbx_set_diag_state_link_num_WORD word0 1404#define lpfc_mbx_set_diag_state_link_num_WORD word0
@@ -2556,7 +2573,7 @@ struct lpfc_mbx_get_sli4_parameters {
2556}; 2573};
2557 2574
2558struct lpfc_rscr_desc_generic { 2575struct lpfc_rscr_desc_generic {
2559#define LPFC_RSRC_DESC_WSIZE 18 2576#define LPFC_RSRC_DESC_WSIZE 22
2560 uint32_t desc[LPFC_RSRC_DESC_WSIZE]; 2577 uint32_t desc[LPFC_RSRC_DESC_WSIZE];
2561}; 2578};
2562 2579
@@ -2566,6 +2583,9 @@ struct lpfc_rsrc_desc_pcie {
2566#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff 2583#define lpfc_rsrc_desc_pcie_type_MASK 0x000000ff
2567#define lpfc_rsrc_desc_pcie_type_WORD word0 2584#define lpfc_rsrc_desc_pcie_type_WORD word0
2568#define LPFC_RSRC_DESC_TYPE_PCIE 0x40 2585#define LPFC_RSRC_DESC_TYPE_PCIE 0x40
2586#define lpfc_rsrc_desc_pcie_length_SHIFT 8
2587#define lpfc_rsrc_desc_pcie_length_MASK 0x000000ff
2588#define lpfc_rsrc_desc_pcie_length_WORD word0
2569 uint32_t word1; 2589 uint32_t word1;
2570#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0 2590#define lpfc_rsrc_desc_pcie_pfnum_SHIFT 0
2571#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff 2591#define lpfc_rsrc_desc_pcie_pfnum_MASK 0x000000ff
@@ -2593,6 +2613,12 @@ struct lpfc_rsrc_desc_fcfcoe {
2593#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff 2613#define lpfc_rsrc_desc_fcfcoe_type_MASK 0x000000ff
2594#define lpfc_rsrc_desc_fcfcoe_type_WORD word0 2614#define lpfc_rsrc_desc_fcfcoe_type_WORD word0
2595#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43 2615#define LPFC_RSRC_DESC_TYPE_FCFCOE 0x43
2616#define lpfc_rsrc_desc_fcfcoe_length_SHIFT 8
2617#define lpfc_rsrc_desc_fcfcoe_length_MASK 0x000000ff
2618#define lpfc_rsrc_desc_fcfcoe_length_WORD word0
2619#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD 0
2620#define LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH 72
2621#define LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH 88
2596 uint32_t word1; 2622 uint32_t word1;
2597#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0 2623#define lpfc_rsrc_desc_fcfcoe_vfnum_SHIFT 0
2598#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff 2624#define lpfc_rsrc_desc_fcfcoe_vfnum_MASK 0x000000ff
@@ -2651,6 +2677,12 @@ struct lpfc_rsrc_desc_fcfcoe {
2651#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16 2677#define lpfc_rsrc_desc_fcfcoe_eq_cnt_SHIFT 16
2652#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff 2678#define lpfc_rsrc_desc_fcfcoe_eq_cnt_MASK 0x0000ffff
2653#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13 2679#define lpfc_rsrc_desc_fcfcoe_eq_cnt_WORD word13
2680/* extended FC/FCoE Resource Descriptor when length = 88 bytes */
2681 uint32_t bw_min;
2682 uint32_t bw_max;
2683 uint32_t iops_min;
2684 uint32_t iops_max;
2685 uint32_t reserved[4];
2654}; 2686};
2655 2687
2656struct lpfc_func_cfg { 2688struct lpfc_func_cfg {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 628a703abdd..8a55a586dd6 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -480,11 +480,11 @@ lpfc_config_port_post(struct lpfc_hba *phba)
480 phba->link_state = LPFC_LINK_DOWN; 480 phba->link_state = LPFC_LINK_DOWN;
481 481
482 /* Only process IOCBs on ELS ring till hba_state is READY */ 482 /* Only process IOCBs on ELS ring till hba_state is READY */
483 if (psli->ring[psli->extra_ring].cmdringaddr) 483 if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)
484 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT; 484 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
485 if (psli->ring[psli->fcp_ring].cmdringaddr) 485 if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)
486 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT; 486 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
487 if (psli->ring[psli->next_ring].cmdringaddr) 487 if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)
488 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 488 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
489 489
490 /* Post receive buffers for desired rings */ 490 /* Post receive buffers for desired rings */
@@ -2059,6 +2059,11 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2059 oneConnect = 1; 2059 oneConnect = 1;
2060 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; 2060 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2061 break; 2061 break;
2062 case PCI_DEVICE_ID_SKYHAWK:
2063 case PCI_DEVICE_ID_SKYHAWK_VF:
2064 oneConnect = 1;
2065 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2066 break;
2062 default: 2067 default:
2063 m = (typeof(m)){"Unknown", "", ""}; 2068 m = (typeof(m)){"Unknown", "", ""};
2064 break; 2069 break;
@@ -4546,6 +4551,13 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4546 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; 4551 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4547 } 4552 }
4548 4553
4554 if (!phba->sli.ring)
4555 phba->sli.ring = (struct lpfc_sli_ring *)
4556 kzalloc(LPFC_SLI3_MAX_RING *
4557 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4558 if (!phba->sli.ring)
4559 return -ENOMEM;
4560
4549 /* 4561 /*
4550 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4562 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4551 * used to create the sg_dma_buf_pool must be dynamically calculated. 4563 * used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -4690,6 +4702,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4690 /* Get all the module params for configuring this host */ 4702 /* Get all the module params for configuring this host */
4691 lpfc_get_cfgparam(phba); 4703 lpfc_get_cfgparam(phba);
4692 phba->max_vpi = LPFC_MAX_VPI; 4704 phba->max_vpi = LPFC_MAX_VPI;
4705
4706 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
4707 phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count;
4708
4693 /* This will be set to correct value after the read_config mbox */ 4709 /* This will be set to correct value after the read_config mbox */
4694 phba->max_vports = 0; 4710 phba->max_vports = 0;
4695 4711
@@ -4705,6 +4721,16 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4705 sges_per_segment = 2; 4721 sges_per_segment = 2;
4706 4722
4707 /* 4723 /*
4724 * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
4725 * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
4726 */
4727 if (!phba->sli.ring)
4728 phba->sli.ring = kzalloc(
4729 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
4730 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4731 if (!phba->sli.ring)
4732 return -ENOMEM;
4733 /*
4708 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4734 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4709 * used to create the sg_dma_buf_pool must be dynamically calculated. 4735 * used to create the sg_dma_buf_pool must be dynamically calculated.
4710 * 2 segments are added since the IOCB needs a command and response bde. 4736 * 2 segments are added since the IOCB needs a command and response bde.
@@ -4909,21 +4935,15 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4909 goto out_remove_rpi_hdrs; 4935 goto out_remove_rpi_hdrs;
4910 } 4936 }
4911 4937
4912 /* 4938 phba->sli4_hba.fcp_eq_hdl =
4913 * The cfg_fcp_eq_count can be zero whenever there is exactly one 4939 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4914 * interrupt vector. This is not an error 4940 phba->cfg_fcp_io_channel), GFP_KERNEL);
4915 */ 4941 if (!phba->sli4_hba.fcp_eq_hdl) {
4916 if (phba->cfg_fcp_eq_count) { 4942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4917 phba->sli4_hba.fcp_eq_hdl = 4943 "2572 Failed allocate memory for "
4918 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4944 "fast-path per-EQ handle array\n");
4919 phba->cfg_fcp_eq_count), GFP_KERNEL); 4945 rc = -ENOMEM;
4920 if (!phba->sli4_hba.fcp_eq_hdl) { 4946 goto out_free_fcf_rr_bmask;
4921 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4922 "2572 Failed allocate memory for "
4923 "fast-path per-EQ handle array\n");
4924 rc = -ENOMEM;
4925 goto out_free_fcf_rr_bmask;
4926 }
4927 } 4947 }
4928 4948
4929 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4949 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -5550,6 +5570,10 @@ lpfc_hba_free(struct lpfc_hba *phba)
5550 /* Release the driver assigned board number */ 5570 /* Release the driver assigned board number */
5551 idr_remove(&lpfc_hba_index, phba->brd_no); 5571 idr_remove(&lpfc_hba_index, phba->brd_no);
5552 5572
5573 /* Free memory allocated with sli rings */
5574 kfree(phba->sli.ring);
5575 phba->sli.ring = NULL;
5576
5553 kfree(phba); 5577 kfree(phba);
5554 return; 5578 return;
5555} 5579}
@@ -6275,8 +6299,9 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6275 uint32_t shdr_status, shdr_add_status; 6299 uint32_t shdr_status, shdr_add_status;
6276 struct lpfc_mbx_get_func_cfg *get_func_cfg; 6300 struct lpfc_mbx_get_func_cfg *get_func_cfg;
6277 struct lpfc_rsrc_desc_fcfcoe *desc; 6301 struct lpfc_rsrc_desc_fcfcoe *desc;
6302 char *pdesc_0;
6278 uint32_t desc_count; 6303 uint32_t desc_count;
6279 int length, i, rc = 0; 6304 int length, i, rc = 0, rc2;
6280 6305
6281 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6306 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6282 if (!pmb) { 6307 if (!pmb) {
@@ -6388,18 +6413,17 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6388 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG, 6413 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
6389 length, LPFC_SLI4_MBX_EMBED); 6414 length, LPFC_SLI4_MBX_EMBED);
6390 6415
6391 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 6416 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6392 shdr = (union lpfc_sli4_cfg_shdr *) 6417 shdr = (union lpfc_sli4_cfg_shdr *)
6393 &pmb->u.mqe.un.sli4_config.header.cfg_shdr; 6418 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6394 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6419 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6395 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6420 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6396 if (rc || shdr_status || shdr_add_status) { 6421 if (rc2 || shdr_status || shdr_add_status) {
6397 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6422 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6398 "3026 Mailbox failed , mbxCmd x%x " 6423 "3026 Mailbox failed , mbxCmd x%x "
6399 "GET_FUNCTION_CONFIG, mbxStatus x%x\n", 6424 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6400 bf_get(lpfc_mqe_command, &pmb->u.mqe), 6425 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6401 bf_get(lpfc_mqe_status, &pmb->u.mqe)); 6426 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6402 rc = -EIO;
6403 goto read_cfg_out; 6427 goto read_cfg_out;
6404 } 6428 }
6405 6429
@@ -6407,11 +6431,18 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6407 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 6431 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6408 desc_count = get_func_cfg->func_cfg.rsrc_desc_count; 6432 desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6409 6433
6434 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
6435 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
6436 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
6437 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
6438 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
6439 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
6440 goto read_cfg_out;
6441
6410 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) { 6442 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6411 desc = (struct lpfc_rsrc_desc_fcfcoe *) 6443 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
6412 &get_func_cfg->func_cfg.desc[i];
6413 if (LPFC_RSRC_DESC_TYPE_FCFCOE == 6444 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6414 bf_get(lpfc_rsrc_desc_pcie_type, desc)) { 6445 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
6415 phba->sli4_hba.iov.pf_number = 6446 phba->sli4_hba.iov.pf_number =
6416 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc); 6447 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6417 phba->sli4_hba.iov.vf_number = 6448 phba->sli4_hba.iov.vf_number =
@@ -6425,13 +6456,11 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6425 "3027 GET_FUNCTION_CONFIG: pf_number:%d, " 6456 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6426 "vf_number:%d\n", phba->sli4_hba.iov.pf_number, 6457 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6427 phba->sli4_hba.iov.vf_number); 6458 phba->sli4_hba.iov.vf_number);
6428 else { 6459 else
6429 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6460 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6430 "3028 GET_FUNCTION_CONFIG: failed to find " 6461 "3028 GET_FUNCTION_CONFIG: failed to find "
6431 "Resrouce Descriptor:x%x\n", 6462 "Resrouce Descriptor:x%x\n",
6432 LPFC_RSRC_DESC_TYPE_FCFCOE); 6463 LPFC_RSRC_DESC_TYPE_FCFCOE);
6433 rc = -EIO;
6434 }
6435 6464
6436read_cfg_out: 6465read_cfg_out:
6437 mempool_free(pmb, phba->mbox_mem_pool); 6466 mempool_free(pmb, phba->mbox_mem_pool);
@@ -6512,53 +6541,40 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
6512static int 6541static int
6513lpfc_sli4_queue_verify(struct lpfc_hba *phba) 6542lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6514{ 6543{
6515 int cfg_fcp_wq_count; 6544 int cfg_fcp_io_channel;
6516 int cfg_fcp_eq_count; 6545 uint32_t cpu;
6546 uint32_t i = 0;
6547
6517 6548
6518 /* 6549 /*
6519 * Sanity check for confiugred queue parameters against the run-time 6550 * Sanity check for configured queue parameters against the run-time
6520 * device parameters 6551 * device parameters
6521 */ 6552 */
6522 6553
6523 /* Sanity check on FCP fast-path WQ parameters */ 6554 /* Sanity check on HBA EQ parameters */
6524 cfg_fcp_wq_count = phba->cfg_fcp_wq_count; 6555 cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
6525 if (cfg_fcp_wq_count > 6556
6526 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { 6557 /* It doesn't make sense to have more io channels then CPUs */
6527 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - 6558 for_each_online_cpu(cpu) {
6528 LPFC_SP_WQN_DEF; 6559 i++;
6529 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { 6560 }
6530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6561 if (i < cfg_fcp_io_channel) {
6531 "2581 Not enough WQs (%d) from "
6532 "the pci function for supporting "
6533 "FCP WQs (%d)\n",
6534 phba->sli4_hba.max_cfg_param.max_wq,
6535 phba->cfg_fcp_wq_count);
6536 goto out_error;
6537 }
6538 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6562 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6539 "2582 Not enough WQs (%d) from the pci " 6563 "3188 Reducing IO channels to match number of "
6540 "function for supporting the requested " 6564 "CPUs: from %d to %d\n", cfg_fcp_io_channel, i);
6541 "FCP WQs (%d), the actual FCP WQs can " 6565 cfg_fcp_io_channel = i;
6542 "be supported: %d\n", 6566 }
6543 phba->sli4_hba.max_cfg_param.max_wq, 6567
6544 phba->cfg_fcp_wq_count, cfg_fcp_wq_count); 6568 if (cfg_fcp_io_channel >
6545 } 6569 phba->sli4_hba.max_cfg_param.max_eq) {
6546 /* The actual number of FCP work queues adopted */ 6570 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
6547 phba->cfg_fcp_wq_count = cfg_fcp_wq_count; 6571 if (cfg_fcp_io_channel < LPFC_FCP_IO_CHAN_MIN) {
6548
6549 /* Sanity check on FCP fast-path EQ parameters */
6550 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
6551 if (cfg_fcp_eq_count >
6552 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
6553 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
6554 LPFC_SP_EQN_DEF;
6555 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
6556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6557 "2574 Not enough EQs (%d) from the " 6573 "2574 Not enough EQs (%d) from the "
6558 "pci function for supporting FCP " 6574 "pci function for supporting FCP "
6559 "EQs (%d)\n", 6575 "EQs (%d)\n",
6560 phba->sli4_hba.max_cfg_param.max_eq, 6576 phba->sli4_hba.max_cfg_param.max_eq,
6561 phba->cfg_fcp_eq_count); 6577 phba->cfg_fcp_io_channel);
6562 goto out_error; 6578 goto out_error;
6563 } 6579 }
6564 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6580 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -6567,22 +6583,16 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6567 "FCP EQs (%d), the actual FCP EQs can " 6583 "FCP EQs (%d), the actual FCP EQs can "
6568 "be supported: %d\n", 6584 "be supported: %d\n",
6569 phba->sli4_hba.max_cfg_param.max_eq, 6585 phba->sli4_hba.max_cfg_param.max_eq,
6570 phba->cfg_fcp_eq_count, cfg_fcp_eq_count); 6586 phba->cfg_fcp_io_channel, cfg_fcp_io_channel);
6571 }
6572 /* It does not make sense to have more EQs than WQs */
6573 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
6574 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6575 "2593 The FCP EQ count(%d) cannot be greater "
6576 "than the FCP WQ count(%d), limiting the "
6577 "FCP EQ count to %d\n", cfg_fcp_eq_count,
6578 phba->cfg_fcp_wq_count,
6579 phba->cfg_fcp_wq_count);
6580 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
6581 } 6587 }
6588
6589 /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
6590
6582 /* The actual number of FCP event queues adopted */ 6591 /* The actual number of FCP event queues adopted */
6583 phba->cfg_fcp_eq_count = cfg_fcp_eq_count; 6592 phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
6584 /* The overall number of event queues used */ 6593 phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
6585 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 6594 phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
6595 phba->sli4_hba.cfg_eqn = cfg_fcp_io_channel;
6586 6596
6587 /* Get EQ depth from module parameter, fake the default for now */ 6597 /* Get EQ depth from module parameter, fake the default for now */
6588 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 6598 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
@@ -6615,50 +6625,104 @@ int
6615lpfc_sli4_queue_create(struct lpfc_hba *phba) 6625lpfc_sli4_queue_create(struct lpfc_hba *phba)
6616{ 6626{
6617 struct lpfc_queue *qdesc; 6627 struct lpfc_queue *qdesc;
6618 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6628 int idx;
6619 6629
6620 /* 6630 /*
6621 * Create Event Queues (EQs) 6631 * Create HBA Record arrays.
6622 */ 6632 */
6633 if (!phba->cfg_fcp_io_channel)
6634 return -ERANGE;
6623 6635
6624 /* Create slow path event queue */ 6636 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6625 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6637 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6626 phba->sli4_hba.eq_ecount); 6638 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6627 if (!qdesc) { 6639 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6640 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6641 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6642
6643 phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) *
6644 phba->cfg_fcp_io_channel), GFP_KERNEL);
6645 if (!phba->sli4_hba.hba_eq) {
6646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6647 "2576 Failed allocate memory for "
6648 "fast-path EQ record array\n");
6649 goto out_error;
6650 }
6651
6652 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6653 phba->cfg_fcp_io_channel), GFP_KERNEL);
6654 if (!phba->sli4_hba.fcp_cq) {
6628 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6655 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6629 "0496 Failed allocate slow-path EQ\n"); 6656 "2577 Failed allocate memory for fast-path "
6657 "CQ record array\n");
6658 goto out_error;
6659 }
6660
6661 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6662 phba->cfg_fcp_io_channel), GFP_KERNEL);
6663 if (!phba->sli4_hba.fcp_wq) {
6664 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6665 "2578 Failed allocate memory for fast-path "
6666 "WQ record array\n");
6630 goto out_error; 6667 goto out_error;
6631 } 6668 }
6632 phba->sli4_hba.sp_eq = qdesc;
6633 6669
6634 /* 6670 /*
6635 * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be 6671 * Since the first EQ can have multiple CQs associated with it,
6636 * zero whenever there is exactly one interrupt vector. This is not 6672 * this array is used to quickly see if we have a FCP fast-path
6637 * an error. 6673 * CQ match.
6638 */ 6674 */
6639 if (phba->cfg_fcp_eq_count) { 6675 phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
6640 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 6676 phba->cfg_fcp_io_channel), GFP_KERNEL);
6641 phba->cfg_fcp_eq_count), GFP_KERNEL); 6677 if (!phba->sli4_hba.fcp_cq_map) {
6642 if (!phba->sli4_hba.fp_eq) { 6678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6643 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6679 "2545 Failed allocate memory for fast-path "
6644 "2576 Failed allocate memory for " 6680 "CQ map\n");
6645 "fast-path EQ record array\n"); 6681 goto out_error;
6646 goto out_free_sp_eq;
6647 }
6648 } 6682 }
6649 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6683
6684 /*
6685 * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies
6686 * how many EQs to create.
6687 */
6688 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6689
6690 /* Create EQs */
6650 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6691 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6651 phba->sli4_hba.eq_ecount); 6692 phba->sli4_hba.eq_ecount);
6652 if (!qdesc) { 6693 if (!qdesc) {
6653 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6654 "0497 Failed allocate fast-path EQ\n"); 6695 "0497 Failed allocate EQ (%d)\n", idx);
6655 goto out_free_fp_eq; 6696 goto out_error;
6656 } 6697 }
6657 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; 6698 phba->sli4_hba.hba_eq[idx] = qdesc;
6699
6700 /* Create Fast Path FCP CQs */
6701 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6702 phba->sli4_hba.cq_ecount);
6703 if (!qdesc) {
6704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6705 "0499 Failed allocate fast-path FCP "
6706 "CQ (%d)\n", idx);
6707 goto out_error;
6708 }
6709 phba->sli4_hba.fcp_cq[idx] = qdesc;
6710
6711 /* Create Fast Path FCP WQs */
6712 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6713 phba->sli4_hba.wq_ecount);
6714 if (!qdesc) {
6715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6716 "0503 Failed allocate fast-path FCP "
6717 "WQ (%d)\n", idx);
6718 goto out_error;
6719 }
6720 phba->sli4_hba.fcp_wq[idx] = qdesc;
6658 } 6721 }
6659 6722
6723
6660 /* 6724 /*
6661 * Create Complete Queues (CQs) 6725 * Create Slow Path Completion Queues (CQs)
6662 */ 6726 */
6663 6727
6664 /* Create slow-path Mailbox Command Complete Queue */ 6728 /* Create slow-path Mailbox Command Complete Queue */
@@ -6667,7 +6731,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6667 if (!qdesc) { 6731 if (!qdesc) {
6668 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6669 "0500 Failed allocate slow-path mailbox CQ\n"); 6733 "0500 Failed allocate slow-path mailbox CQ\n");
6670 goto out_free_fp_eq; 6734 goto out_error;
6671 } 6735 }
6672 phba->sli4_hba.mbx_cq = qdesc; 6736 phba->sli4_hba.mbx_cq = qdesc;
6673 6737
@@ -6677,59 +6741,29 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6677 if (!qdesc) { 6741 if (!qdesc) {
6678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6679 "0501 Failed allocate slow-path ELS CQ\n"); 6743 "0501 Failed allocate slow-path ELS CQ\n");
6680 goto out_free_mbx_cq; 6744 goto out_error;
6681 } 6745 }
6682 phba->sli4_hba.els_cq = qdesc; 6746 phba->sli4_hba.els_cq = qdesc;
6683 6747
6684 6748
6685 /* 6749 /*
6686 * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs. 6750 * Create Slow Path Work Queues (WQs)
6687 * If there are no FCP EQs then create exactly one FCP CQ.
6688 */ 6751 */
6689 if (phba->cfg_fcp_eq_count)
6690 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6691 phba->cfg_fcp_eq_count),
6692 GFP_KERNEL);
6693 else
6694 phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *),
6695 GFP_KERNEL);
6696 if (!phba->sli4_hba.fcp_cq) {
6697 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6698 "2577 Failed allocate memory for fast-path "
6699 "CQ record array\n");
6700 goto out_free_els_cq;
6701 }
6702 fcp_cqidx = 0;
6703 do {
6704 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6705 phba->sli4_hba.cq_ecount);
6706 if (!qdesc) {
6707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6708 "0499 Failed allocate fast-path FCP "
6709 "CQ (%d)\n", fcp_cqidx);
6710 goto out_free_fcp_cq;
6711 }
6712 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
6713 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6714 6752
6715 /* Create Mailbox Command Queue */ 6753 /* Create Mailbox Command Queue */
6716 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6717 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6718 6754
6719 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, 6755 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
6720 phba->sli4_hba.mq_ecount); 6756 phba->sli4_hba.mq_ecount);
6721 if (!qdesc) { 6757 if (!qdesc) {
6722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6723 "0505 Failed allocate slow-path MQ\n"); 6759 "0505 Failed allocate slow-path MQ\n");
6724 goto out_free_fcp_cq; 6760 goto out_error;
6725 } 6761 }
6726 phba->sli4_hba.mbx_wq = qdesc; 6762 phba->sli4_hba.mbx_wq = qdesc;
6727 6763
6728 /* 6764 /*
6729 * Create all the Work Queues (WQs) 6765 * Create ELS Work Queues
6730 */ 6766 */
6731 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6732 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6733 6767
6734 /* Create slow-path ELS Work Queue */ 6768 /* Create slow-path ELS Work Queue */
6735 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, 6769 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
@@ -6737,36 +6771,13 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6737 if (!qdesc) { 6771 if (!qdesc) {
6738 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6772 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6739 "0504 Failed allocate slow-path ELS WQ\n"); 6773 "0504 Failed allocate slow-path ELS WQ\n");
6740 goto out_free_mbx_wq; 6774 goto out_error;
6741 } 6775 }
6742 phba->sli4_hba.els_wq = qdesc; 6776 phba->sli4_hba.els_wq = qdesc;
6743 6777
6744 /* Create fast-path FCP Work Queue(s) */
6745 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6746 phba->cfg_fcp_wq_count), GFP_KERNEL);
6747 if (!phba->sli4_hba.fcp_wq) {
6748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6749 "2578 Failed allocate memory for fast-path "
6750 "WQ record array\n");
6751 goto out_free_els_wq;
6752 }
6753 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6754 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6755 phba->sli4_hba.wq_ecount);
6756 if (!qdesc) {
6757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6758 "0503 Failed allocate fast-path FCP "
6759 "WQ (%d)\n", fcp_wqidx);
6760 goto out_free_fcp_wq;
6761 }
6762 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
6763 }
6764
6765 /* 6778 /*
6766 * Create Receive Queue (RQ) 6779 * Create Receive Queue (RQ)
6767 */ 6780 */
6768 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6769 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6770 6781
6771 /* Create Receive Queue for header */ 6782 /* Create Receive Queue for header */
6772 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, 6783 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
@@ -6774,7 +6785,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6774 if (!qdesc) { 6785 if (!qdesc) {
6775 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6776 "0506 Failed allocate receive HRQ\n"); 6787 "0506 Failed allocate receive HRQ\n");
6777 goto out_free_fcp_wq; 6788 goto out_error;
6778 } 6789 }
6779 phba->sli4_hba.hdr_rq = qdesc; 6790 phba->sli4_hba.hdr_rq = qdesc;
6780 6791
@@ -6784,52 +6795,14 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6784 if (!qdesc) { 6795 if (!qdesc) {
6785 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6786 "0507 Failed allocate receive DRQ\n"); 6797 "0507 Failed allocate receive DRQ\n");
6787 goto out_free_hdr_rq; 6798 goto out_error;
6788 } 6799 }
6789 phba->sli4_hba.dat_rq = qdesc; 6800 phba->sli4_hba.dat_rq = qdesc;
6790 6801
6791 return 0; 6802 return 0;
6792 6803
6793out_free_hdr_rq:
6794 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6795 phba->sli4_hba.hdr_rq = NULL;
6796out_free_fcp_wq:
6797 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
6798 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
6799 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6800 }
6801 kfree(phba->sli4_hba.fcp_wq);
6802 phba->sli4_hba.fcp_wq = NULL;
6803out_free_els_wq:
6804 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6805 phba->sli4_hba.els_wq = NULL;
6806out_free_mbx_wq:
6807 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6808 phba->sli4_hba.mbx_wq = NULL;
6809out_free_fcp_cq:
6810 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
6811 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
6812 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6813 }
6814 kfree(phba->sli4_hba.fcp_cq);
6815 phba->sli4_hba.fcp_cq = NULL;
6816out_free_els_cq:
6817 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6818 phba->sli4_hba.els_cq = NULL;
6819out_free_mbx_cq:
6820 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6821 phba->sli4_hba.mbx_cq = NULL;
6822out_free_fp_eq:
6823 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
6824 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
6825 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6826 }
6827 kfree(phba->sli4_hba.fp_eq);
6828 phba->sli4_hba.fp_eq = NULL;
6829out_free_sp_eq:
6830 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6831 phba->sli4_hba.sp_eq = NULL;
6832out_error: 6804out_error:
6805 lpfc_sli4_queue_destroy(phba);
6833 return -ENOMEM; 6806 return -ENOMEM;
6834} 6807}
6835 6808
@@ -6848,58 +6821,86 @@ out_error:
6848void 6821void
6849lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 6822lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6850{ 6823{
6851 int fcp_qidx; 6824 int idx;
6825
6826 if (phba->sli4_hba.hba_eq != NULL) {
6827 /* Release HBA event queue */
6828 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6829 if (phba->sli4_hba.hba_eq[idx] != NULL) {
6830 lpfc_sli4_queue_free(
6831 phba->sli4_hba.hba_eq[idx]);
6832 phba->sli4_hba.hba_eq[idx] = NULL;
6833 }
6834 }
6835 kfree(phba->sli4_hba.hba_eq);
6836 phba->sli4_hba.hba_eq = NULL;
6837 }
6838
6839 if (phba->sli4_hba.fcp_cq != NULL) {
6840 /* Release FCP completion queue */
6841 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6842 if (phba->sli4_hba.fcp_cq[idx] != NULL) {
6843 lpfc_sli4_queue_free(
6844 phba->sli4_hba.fcp_cq[idx]);
6845 phba->sli4_hba.fcp_cq[idx] = NULL;
6846 }
6847 }
6848 kfree(phba->sli4_hba.fcp_cq);
6849 phba->sli4_hba.fcp_cq = NULL;
6850 }
6851
6852 if (phba->sli4_hba.fcp_wq != NULL) {
6853 /* Release FCP work queue */
6854 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6855 if (phba->sli4_hba.fcp_wq[idx] != NULL) {
6856 lpfc_sli4_queue_free(
6857 phba->sli4_hba.fcp_wq[idx]);
6858 phba->sli4_hba.fcp_wq[idx] = NULL;
6859 }
6860 }
6861 kfree(phba->sli4_hba.fcp_wq);
6862 phba->sli4_hba.fcp_wq = NULL;
6863 }
6864
6865 /* Release FCP CQ mapping array */
6866 if (phba->sli4_hba.fcp_cq_map != NULL) {
6867 kfree(phba->sli4_hba.fcp_cq_map);
6868 phba->sli4_hba.fcp_cq_map = NULL;
6869 }
6852 6870
6853 /* Release mailbox command work queue */ 6871 /* Release mailbox command work queue */
6854 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); 6872 if (phba->sli4_hba.mbx_wq != NULL) {
6855 phba->sli4_hba.mbx_wq = NULL; 6873 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6874 phba->sli4_hba.mbx_wq = NULL;
6875 }
6856 6876
6857 /* Release ELS work queue */ 6877 /* Release ELS work queue */
6858 lpfc_sli4_queue_free(phba->sli4_hba.els_wq); 6878 if (phba->sli4_hba.els_wq != NULL) {
6859 phba->sli4_hba.els_wq = NULL; 6879 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6860 6880 phba->sli4_hba.els_wq = NULL;
6861 /* Release FCP work queue */ 6881 }
6862 if (phba->sli4_hba.fcp_wq != NULL)
6863 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
6864 fcp_qidx++)
6865 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6866 kfree(phba->sli4_hba.fcp_wq);
6867 phba->sli4_hba.fcp_wq = NULL;
6868 6882
6869 /* Release unsolicited receive queue */ 6883 /* Release unsolicited receive queue */
6870 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); 6884 if (phba->sli4_hba.hdr_rq != NULL) {
6871 phba->sli4_hba.hdr_rq = NULL; 6885 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6872 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); 6886 phba->sli4_hba.hdr_rq = NULL;
6873 phba->sli4_hba.dat_rq = NULL; 6887 }
6888 if (phba->sli4_hba.dat_rq != NULL) {
6889 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
6890 phba->sli4_hba.dat_rq = NULL;
6891 }
6874 6892
6875 /* Release ELS complete queue */ 6893 /* Release ELS complete queue */
6876 lpfc_sli4_queue_free(phba->sli4_hba.els_cq); 6894 if (phba->sli4_hba.els_cq != NULL) {
6877 phba->sli4_hba.els_cq = NULL; 6895 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6896 phba->sli4_hba.els_cq = NULL;
6897 }
6878 6898
6879 /* Release mailbox command complete queue */ 6899 /* Release mailbox command complete queue */
6880 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); 6900 if (phba->sli4_hba.mbx_cq != NULL) {
6881 phba->sli4_hba.mbx_cq = NULL; 6901 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6882 6902 phba->sli4_hba.mbx_cq = NULL;
6883 /* Release FCP response complete queue */ 6903 }
6884 fcp_qidx = 0;
6885 if (phba->sli4_hba.fcp_cq != NULL)
6886 do
6887 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6888 while (++fcp_qidx < phba->cfg_fcp_eq_count);
6889 kfree(phba->sli4_hba.fcp_cq);
6890 phba->sli4_hba.fcp_cq = NULL;
6891
6892 /* Release fast-path event queue */
6893 if (phba->sli4_hba.fp_eq != NULL)
6894 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
6895 fcp_qidx++)
6896 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6897 kfree(phba->sli4_hba.fp_eq);
6898 phba->sli4_hba.fp_eq = NULL;
6899
6900 /* Release slow-path event queue */
6901 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6902 phba->sli4_hba.sp_eq = NULL;
6903 6904
6904 return; 6905 return;
6905} 6906}
@@ -6919,61 +6920,124 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6919int 6920int
6920lpfc_sli4_queue_setup(struct lpfc_hba *phba) 6921lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6921{ 6922{
6923 struct lpfc_sli *psli = &phba->sli;
6924 struct lpfc_sli_ring *pring;
6922 int rc = -ENOMEM; 6925 int rc = -ENOMEM;
6923 int fcp_eqidx, fcp_cqidx, fcp_wqidx; 6926 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6924 int fcp_cq_index = 0; 6927 int fcp_cq_index = 0;
6925 6928
6926 /* 6929 /*
6927 * Set up Event Queues (EQs) 6930 * Set up HBA Event Queues (EQs)
6928 */ 6931 */
6929 6932
6930 /* Set up slow-path event queue */ 6933 /* Set up HBA event queue */
6931 if (!phba->sli4_hba.sp_eq) { 6934 if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
6932 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6933 "0520 Slow-path EQ not allocated\n");
6934 goto out_error;
6935 }
6936 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6937 LPFC_SP_DEF_IMAX);
6938 if (rc) {
6939 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6940 "0521 Failed setup of slow-path EQ: "
6941 "rc = 0x%x\n", rc);
6942 goto out_error;
6943 }
6944 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6945 "2583 Slow-path EQ setup: queue-id=%d\n",
6946 phba->sli4_hba.sp_eq->queue_id);
6947
6948 /* Set up fast-path event queue */
6949 if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
6950 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6935 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6951 "3147 Fast-path EQs not allocated\n"); 6936 "3147 Fast-path EQs not allocated\n");
6952 rc = -ENOMEM; 6937 rc = -ENOMEM;
6953 goto out_destroy_sp_eq; 6938 goto out_error;
6954 } 6939 }
6955 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6940 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
6956 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { 6941 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
6957 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6942 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6958 "0522 Fast-path EQ (%d) not " 6943 "0522 Fast-path EQ (%d) not "
6959 "allocated\n", fcp_eqidx); 6944 "allocated\n", fcp_eqidx);
6960 rc = -ENOMEM; 6945 rc = -ENOMEM;
6961 goto out_destroy_fp_eq; 6946 goto out_destroy_hba_eq;
6962 } 6947 }
6963 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], 6948 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
6964 phba->cfg_fcp_imax); 6949 (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));
6965 if (rc) { 6950 if (rc) {
6966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6951 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6967 "0523 Failed setup of fast-path EQ " 6952 "0523 Failed setup of fast-path EQ "
6968 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 6953 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
6969 goto out_destroy_fp_eq; 6954 goto out_destroy_hba_eq;
6970 } 6955 }
6971 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6956 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6972 "2584 Fast-path EQ setup: " 6957 "2584 HBA EQ setup: "
6973 "queue[%d]-id=%d\n", fcp_eqidx, 6958 "queue[%d]-id=%d\n", fcp_eqidx,
6974 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); 6959 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
6975 } 6960 }
6976 6961
6962 /* Set up fast-path FCP Response Complete Queue */
6963 if (!phba->sli4_hba.fcp_cq) {
6964 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6965 "3148 Fast-path FCP CQ array not "
6966 "allocated\n");
6967 rc = -ENOMEM;
6968 goto out_destroy_hba_eq;
6969 }
6970
6971 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
6972 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6973 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6974 "0526 Fast-path FCP CQ (%d) not "
6975 "allocated\n", fcp_cqidx);
6976 rc = -ENOMEM;
6977 goto out_destroy_fcp_cq;
6978 }
6979 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
6980 phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
6981 if (rc) {
6982 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6983 "0527 Failed setup of fast-path FCP "
6984 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6985 goto out_destroy_fcp_cq;
6986 }
6987
6988 /* Setup fcp_cq_map for fast lookup */
6989 phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
6990 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
6991
6992 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6993 "2588 FCP CQ setup: cq[%d]-id=%d, "
6994 "parent seq[%d]-id=%d\n",
6995 fcp_cqidx,
6996 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6997 fcp_cqidx,
6998 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
6999 }
7000
7001 /* Set up fast-path FCP Work Queue */
7002 if (!phba->sli4_hba.fcp_wq) {
7003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7004 "3149 Fast-path FCP WQ array not "
7005 "allocated\n");
7006 rc = -ENOMEM;
7007 goto out_destroy_fcp_cq;
7008 }
7009
7010 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
7011 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
7012 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7013 "0534 Fast-path FCP WQ (%d) not "
7014 "allocated\n", fcp_wqidx);
7015 rc = -ENOMEM;
7016 goto out_destroy_fcp_wq;
7017 }
7018 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
7019 phba->sli4_hba.fcp_cq[fcp_wqidx],
7020 LPFC_FCP);
7021 if (rc) {
7022 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7023 "0535 Failed setup of fast-path FCP "
7024 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
7025 goto out_destroy_fcp_wq;
7026 }
7027
7028 /* Bind this WQ to the next FCP ring */
7029 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
7030 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
7031 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
7032
7033 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7034 "2591 FCP WQ setup: wq[%d]-id=%d, "
7035 "parent cq[%d]-id=%d\n",
7036 fcp_wqidx,
7037 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
7038 fcp_cq_index,
7039 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
7040 }
6977 /* 7041 /*
6978 * Set up Complete Queues (CQs) 7042 * Set up Complete Queues (CQs)
6979 */ 7043 */
@@ -6983,20 +7047,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6984 "0528 Mailbox CQ not allocated\n"); 7048 "0528 Mailbox CQ not allocated\n");
6985 rc = -ENOMEM; 7049 rc = -ENOMEM;
6986 goto out_destroy_fp_eq; 7050 goto out_destroy_fcp_wq;
6987 } 7051 }
6988 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, 7052 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
6989 LPFC_MCQ, LPFC_MBOX); 7053 phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
6990 if (rc) { 7054 if (rc) {
6991 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7055 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6992 "0529 Failed setup of slow-path mailbox CQ: " 7056 "0529 Failed setup of slow-path mailbox CQ: "
6993 "rc = 0x%x\n", rc); 7057 "rc = 0x%x\n", rc);
6994 goto out_destroy_fp_eq; 7058 goto out_destroy_fcp_wq;
6995 } 7059 }
6996 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7060 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6997 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", 7061 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6998 phba->sli4_hba.mbx_cq->queue_id, 7062 phba->sli4_hba.mbx_cq->queue_id,
6999 phba->sli4_hba.sp_eq->queue_id); 7063 phba->sli4_hba.hba_eq[0]->queue_id);
7000 7064
7001 /* Set up slow-path ELS Complete Queue */ 7065 /* Set up slow-path ELS Complete Queue */
7002 if (!phba->sli4_hba.els_cq) { 7066 if (!phba->sli4_hba.els_cq) {
@@ -7005,8 +7069,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7005 rc = -ENOMEM; 7069 rc = -ENOMEM;
7006 goto out_destroy_mbx_cq; 7070 goto out_destroy_mbx_cq;
7007 } 7071 }
7008 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, 7072 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
7009 LPFC_WCQ, LPFC_ELS); 7073 phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
7010 if (rc) { 7074 if (rc) {
7011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7012 "0531 Failed setup of slow-path ELS CQ: " 7076 "0531 Failed setup of slow-path ELS CQ: "
@@ -7016,52 +7080,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7016 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7017 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", 7081 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
7018 phba->sli4_hba.els_cq->queue_id, 7082 phba->sli4_hba.els_cq->queue_id,
7019 phba->sli4_hba.sp_eq->queue_id); 7083 phba->sli4_hba.hba_eq[0]->queue_id);
7020
7021 /* Set up fast-path FCP Response Complete Queue */
7022 if (!phba->sli4_hba.fcp_cq) {
7023 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7024 "3148 Fast-path FCP CQ array not "
7025 "allocated\n");
7026 rc = -ENOMEM;
7027 goto out_destroy_els_cq;
7028 }
7029 fcp_cqidx = 0;
7030 do {
7031 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
7032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7033 "0526 Fast-path FCP CQ (%d) not "
7034 "allocated\n", fcp_cqidx);
7035 rc = -ENOMEM;
7036 goto out_destroy_fcp_cq;
7037 }
7038 if (phba->cfg_fcp_eq_count)
7039 rc = lpfc_cq_create(phba,
7040 phba->sli4_hba.fcp_cq[fcp_cqidx],
7041 phba->sli4_hba.fp_eq[fcp_cqidx],
7042 LPFC_WCQ, LPFC_FCP);
7043 else
7044 rc = lpfc_cq_create(phba,
7045 phba->sli4_hba.fcp_cq[fcp_cqidx],
7046 phba->sli4_hba.sp_eq,
7047 LPFC_WCQ, LPFC_FCP);
7048 if (rc) {
7049 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7050 "0527 Failed setup of fast-path FCP "
7051 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
7052 goto out_destroy_fcp_cq;
7053 }
7054 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7055 "2588 FCP CQ setup: cq[%d]-id=%d, "
7056 "parent %seq[%d]-id=%d\n",
7057 fcp_cqidx,
7058 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
7059 (phba->cfg_fcp_eq_count) ? "" : "sp_",
7060 fcp_cqidx,
7061 (phba->cfg_fcp_eq_count) ?
7062 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id :
7063 phba->sli4_hba.sp_eq->queue_id);
7064 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
7065 7084
7066 /* 7085 /*
7067 * Set up all the Work Queues (WQs) 7086 * Set up all the Work Queues (WQs)
@@ -7072,7 +7091,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7091 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7073 "0538 Slow-path MQ not allocated\n"); 7092 "0538 Slow-path MQ not allocated\n");
7074 rc = -ENOMEM; 7093 rc = -ENOMEM;
7075 goto out_destroy_fcp_cq; 7094 goto out_destroy_els_cq;
7076 } 7095 }
7077 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, 7096 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
7078 phba->sli4_hba.mbx_cq, LPFC_MBOX); 7097 phba->sli4_hba.mbx_cq, LPFC_MBOX);
@@ -7080,7 +7099,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7080 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7081 "0539 Failed setup of slow-path MQ: " 7100 "0539 Failed setup of slow-path MQ: "
7082 "rc = 0x%x\n", rc); 7101 "rc = 0x%x\n", rc);
7083 goto out_destroy_fcp_cq; 7102 goto out_destroy_els_cq;
7084 } 7103 }
7085 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7104 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7086 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", 7105 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
@@ -7102,49 +7121,17 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7102 "rc = 0x%x\n", rc); 7121 "rc = 0x%x\n", rc);
7103 goto out_destroy_mbx_wq; 7122 goto out_destroy_mbx_wq;
7104 } 7123 }
7124
7125 /* Bind this WQ to the ELS ring */
7126 pring = &psli->ring[LPFC_ELS_RING];
7127 pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq;
7128 phba->sli4_hba.els_cq->pring = pring;
7129
7105 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7130 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7106 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", 7131 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
7107 phba->sli4_hba.els_wq->queue_id, 7132 phba->sli4_hba.els_wq->queue_id,
7108 phba->sli4_hba.els_cq->queue_id); 7133 phba->sli4_hba.els_cq->queue_id);
7109 7134
7110 /* Set up fast-path FCP Work Queue */
7111 if (!phba->sli4_hba.fcp_wq) {
7112 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7113 "3149 Fast-path FCP WQ array not "
7114 "allocated\n");
7115 rc = -ENOMEM;
7116 goto out_destroy_els_wq;
7117 }
7118 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
7119 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
7120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7121 "0534 Fast-path FCP WQ (%d) not "
7122 "allocated\n", fcp_wqidx);
7123 rc = -ENOMEM;
7124 goto out_destroy_fcp_wq;
7125 }
7126 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
7127 phba->sli4_hba.fcp_cq[fcp_cq_index],
7128 LPFC_FCP);
7129 if (rc) {
7130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7131 "0535 Failed setup of fast-path FCP "
7132 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
7133 goto out_destroy_fcp_wq;
7134 }
7135 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7136 "2591 FCP WQ setup: wq[%d]-id=%d, "
7137 "parent cq[%d]-id=%d\n",
7138 fcp_wqidx,
7139 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
7140 fcp_cq_index,
7141 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
7142 /* Round robin FCP Work Queue's Completion Queue assignment */
7143 if (phba->cfg_fcp_eq_count)
7144 fcp_cq_index = ((fcp_cq_index + 1) %
7145 phba->cfg_fcp_eq_count);
7146 }
7147
7148 /* 7135 /*
7149 * Create Receive Queue (RQ) 7136 * Create Receive Queue (RQ)
7150 */ 7137 */
@@ -7152,7 +7139,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7152 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7153 "0540 Receive Queue not allocated\n"); 7140 "0540 Receive Queue not allocated\n");
7154 rc = -ENOMEM; 7141 rc = -ENOMEM;
7155 goto out_destroy_fcp_wq; 7142 goto out_destroy_els_wq;
7156 } 7143 }
7157 7144
7158 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); 7145 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
@@ -7175,25 +7162,23 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7175 phba->sli4_hba.els_cq->queue_id); 7162 phba->sli4_hba.els_cq->queue_id);
7176 return 0; 7163 return 0;
7177 7164
7178out_destroy_fcp_wq:
7179 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
7180 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
7181out_destroy_els_wq: 7165out_destroy_els_wq:
7182 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); 7166 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7183out_destroy_mbx_wq: 7167out_destroy_mbx_wq:
7184 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); 7168 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7185out_destroy_fcp_cq:
7186 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
7187 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
7188out_destroy_els_cq: 7169out_destroy_els_cq:
7189 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7170 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7190out_destroy_mbx_cq: 7171out_destroy_mbx_cq:
7191 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7172 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7192out_destroy_fp_eq: 7173out_destroy_fcp_wq:
7174 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
7175 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
7176out_destroy_fcp_cq:
7177 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
7178 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
7179out_destroy_hba_eq:
7193 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) 7180 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
7194 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); 7181 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
7195out_destroy_sp_eq:
7196 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
7197out_error: 7182out_error:
7198 return rc; 7183 return rc;
7199} 7184}
@@ -7222,27 +7207,27 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
7222 /* Unset unsolicited receive queue */ 7207 /* Unset unsolicited receive queue */
7223 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); 7208 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7224 /* Unset FCP work queue */ 7209 /* Unset FCP work queue */
7225 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) 7210 if (phba->sli4_hba.fcp_wq) {
7226 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); 7211 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7212 fcp_qidx++)
7213 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
7214 }
7227 /* Unset mailbox command complete queue */ 7215 /* Unset mailbox command complete queue */
7228 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); 7216 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7229 /* Unset ELS complete queue */ 7217 /* Unset ELS complete queue */
7230 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 7218 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7231 /* Unset FCP response complete queue */ 7219 /* Unset FCP response complete queue */
7232 if (phba->sli4_hba.fcp_cq) { 7220 if (phba->sli4_hba.fcp_cq) {
7233 fcp_qidx = 0; 7221 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7234 do { 7222 fcp_qidx++)
7235 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 7223 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
7236 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
7237 } 7224 }
7238 /* Unset fast-path event queue */ 7225 /* Unset fast-path event queue */
7239 if (phba->sli4_hba.fp_eq) { 7226 if (phba->sli4_hba.hba_eq) {
7240 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; 7227 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7241 fcp_qidx++) 7228 fcp_qidx++)
7242 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 7229 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
7243 } 7230 }
7244 /* Unset slow-path event queue */
7245 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
7246} 7231}
7247 7232
7248/** 7233/**
@@ -7590,10 +7575,11 @@ lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
7590 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ 7575 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
7591 length = (sizeof(struct lpfc_mbx_nop) - 7576 length = (sizeof(struct lpfc_mbx_nop) -
7592 sizeof(struct lpfc_sli4_cfg_mhdr)); 7577 sizeof(struct lpfc_sli4_cfg_mhdr));
7593 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7594 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
7595 7578
7596 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 7579 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
7580 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7581 LPFC_MBOX_OPCODE_NOP, length,
7582 LPFC_SLI4_MBX_EMBED);
7597 if (!phba->sli4_hba.intr_enable) 7583 if (!phba->sli4_hba.intr_enable)
7598 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7584 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7599 else { 7585 else {
@@ -8133,33 +8119,22 @@ enable_msix_vectors:
8133 "message=%d\n", index, 8119 "message=%d\n", index,
8134 phba->sli4_hba.msix_entries[index].vector, 8120 phba->sli4_hba.msix_entries[index].vector,
8135 phba->sli4_hba.msix_entries[index].entry); 8121 phba->sli4_hba.msix_entries[index].entry);
8122
8136 /* 8123 /*
8137 * Assign MSI-X vectors to interrupt handlers 8124 * Assign MSI-X vectors to interrupt handlers
8138 */ 8125 */
8139 if (vectors > 1) 8126 for (index = 0; index < vectors; index++) {
8140 rc = request_irq(phba->sli4_hba.msix_entries[0].vector, 8127 memset(&phba->sli4_hba.handler_name[index], 0, 16);
8141 &lpfc_sli4_sp_intr_handler, IRQF_SHARED, 8128 sprintf((char *)&phba->sli4_hba.handler_name[index],
8142 LPFC_SP_DRIVER_HANDLER_NAME, phba); 8129 LPFC_DRIVER_HANDLER_NAME"%d", index);
8143 else
8144 /* All Interrupts need to be handled by one EQ */
8145 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
8146 &lpfc_sli4_intr_handler, IRQF_SHARED,
8147 LPFC_DRIVER_NAME, phba);
8148 if (rc) {
8149 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8150 "0485 MSI-X slow-path request_irq failed "
8151 "(%d)\n", rc);
8152 goto msi_fail_out;
8153 }
8154 8130
8155 /* The rest of the vector(s) are associated to fast-path handler(s) */ 8131 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8156 for (index = 1; index < vectors; index++) { 8132 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8157 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; 8133 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
8158 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
8159 rc = request_irq(phba->sli4_hba.msix_entries[index].vector, 8134 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
8160 &lpfc_sli4_fp_intr_handler, IRQF_SHARED, 8135 &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
8161 LPFC_FP_DRIVER_HANDLER_NAME, 8136 (char *)&phba->sli4_hba.handler_name[index],
8162 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 8137 &phba->sli4_hba.fcp_eq_hdl[index]);
8163 if (rc) { 8138 if (rc) {
8164 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 8139 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8165 "0486 MSI-X fast-path (%d) " 8140 "0486 MSI-X fast-path (%d) "
@@ -8173,12 +8148,9 @@ enable_msix_vectors:
8173 8148
8174cfg_fail_out: 8149cfg_fail_out:
8175 /* free the irq already requested */ 8150 /* free the irq already requested */
8176 for (--index; index >= 1; index--) 8151 for (--index; index >= 0; index--)
8177 free_irq(phba->sli4_hba.msix_entries[index - 1].vector, 8152 free_irq(phba->sli4_hba.msix_entries[index].vector,
8178 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 8153 &phba->sli4_hba.fcp_eq_hdl[index]);
8179
8180 /* free the irq already requested */
8181 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
8182 8154
8183msi_fail_out: 8155msi_fail_out:
8184 /* Unconfigure MSI-X capability structure */ 8156 /* Unconfigure MSI-X capability structure */
@@ -8199,11 +8171,9 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
8199 int index; 8171 int index;
8200 8172
8201 /* Free up MSI-X multi-message vectors */ 8173 /* Free up MSI-X multi-message vectors */
8202 free_irq(phba->sli4_hba.msix_entries[0].vector, phba); 8174 for (index = 0; index < phba->sli4_hba.msix_vec_nr; index++)
8203
8204 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
8205 free_irq(phba->sli4_hba.msix_entries[index].vector, 8175 free_irq(phba->sli4_hba.msix_entries[index].vector,
8206 &phba->sli4_hba.fcp_eq_hdl[index - 1]); 8176 &phba->sli4_hba.fcp_eq_hdl[index]);
8207 8177
8208 /* Disable MSI-X */ 8178 /* Disable MSI-X */
8209 pci_disable_msix(phba->pcidev); 8179 pci_disable_msix(phba->pcidev);
@@ -8249,7 +8219,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
8249 return rc; 8219 return rc;
8250 } 8220 }
8251 8221
8252 for (index = 0; index < phba->cfg_fcp_eq_count; index++) { 8222 for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
8253 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8223 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8254 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8224 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8255 } 8225 }
@@ -8329,10 +8299,12 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8329 /* Indicate initialization to INTx mode */ 8299 /* Indicate initialization to INTx mode */
8330 phba->intr_type = INTx; 8300 phba->intr_type = INTx;
8331 intr_mode = 0; 8301 intr_mode = 0;
8332 for (index = 0; index < phba->cfg_fcp_eq_count; 8302 for (index = 0; index < phba->cfg_fcp_io_channel;
8333 index++) { 8303 index++) {
8334 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8304 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8335 phba->sli4_hba.fcp_eq_hdl[index].phba = phba; 8305 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8306 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
8307 fcp_eq_in_use, 1);
8336 } 8308 }
8337 } 8309 }
8338 } 8310 }
@@ -9449,7 +9421,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9449 int error; 9421 int error;
9450 uint32_t cfg_mode, intr_mode; 9422 uint32_t cfg_mode, intr_mode;
9451 int mcnt; 9423 int mcnt;
9452 int adjusted_fcp_eq_count; 9424 int adjusted_fcp_io_channel;
9453 const struct firmware *fw; 9425 const struct firmware *fw;
9454 uint8_t file_name[16]; 9426 uint8_t file_name[16];
9455 9427
@@ -9552,13 +9524,13 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9552 } 9524 }
9553 /* Default to single EQ for non-MSI-X */ 9525 /* Default to single EQ for non-MSI-X */
9554 if (phba->intr_type != MSIX) 9526 if (phba->intr_type != MSIX)
9555 adjusted_fcp_eq_count = 0; 9527 adjusted_fcp_io_channel = 1;
9556 else if (phba->sli4_hba.msix_vec_nr < 9528 else if (phba->sli4_hba.msix_vec_nr <
9557 phba->cfg_fcp_eq_count + 1) 9529 phba->cfg_fcp_io_channel)
9558 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 9530 adjusted_fcp_io_channel = phba->sli4_hba.msix_vec_nr;
9559 else 9531 else
9560 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count; 9532 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
9561 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count; 9533 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
9562 /* Set up SLI-4 HBA */ 9534 /* Set up SLI-4 HBA */
9563 if (lpfc_sli4_hba_setup(phba)) { 9535 if (lpfc_sli4_hba_setup(phba)) {
9564 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9536 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -9694,6 +9666,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
9694 * buffers are released to their corresponding pools here. 9666 * buffers are released to their corresponding pools here.
9695 */ 9667 */
9696 lpfc_scsi_free(phba); 9668 lpfc_scsi_free(phba);
9669
9697 lpfc_sli4_driver_resource_unset(phba); 9670 lpfc_sli4_driver_resource_unset(phba);
9698 9671
9699 /* Unmap adapter Control and Doorbell registers */ 9672 /* Unmap adapter Control and Doorbell registers */
@@ -10420,6 +10393,10 @@ static struct pci_device_id lpfc_id_table[] = {
10420 PCI_ANY_ID, PCI_ANY_ID, }, 10393 PCI_ANY_ID, PCI_ANY_ID, },
10421 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF, 10394 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
10422 PCI_ANY_ID, PCI_ANY_ID, }, 10395 PCI_ANY_ID, PCI_ANY_ID, },
10396 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
10397 PCI_ANY_ID, PCI_ANY_ID, },
10398 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
10399 PCI_ANY_ID, PCI_ANY_ID, },
10423 { 0 } 10400 { 0 }
10424}; 10401};
10425 10402
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 20336f09fb3..efc9cd9def8 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -92,7 +92,7 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
92 memset(mp->virt, 0, LPFC_BPL_SIZE); 92 memset(mp->virt, 0, LPFC_BPL_SIZE);
93 INIT_LIST_HEAD(&mp->list); 93 INIT_LIST_HEAD(&mp->list);
94 /* save address for completion */ 94 /* save address for completion */
95 pmb->context2 = (uint8_t *) mp; 95 pmb->context1 = (uint8_t *)mp;
96 mb->un.varWords[3] = putPaddrLow(mp->phys); 96 mb->un.varWords[3] = putPaddrLow(mp->phys);
97 mb->un.varWords[4] = putPaddrHigh(mp->phys); 97 mb->un.varWords[4] = putPaddrHigh(mp->phys);
98 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info); 98 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
@@ -950,44 +950,47 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
950 for (i = 0; i < psli->num_rings; i++) { 950 for (i = 0; i < psli->num_rings; i++) {
951 pring = &psli->ring[i]; 951 pring = &psli->ring[i];
952 952
953 pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE: 953 pring->sli.sli3.sizeCiocb =
954 phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
954 SLI2_IOCB_CMD_SIZE; 955 SLI2_IOCB_CMD_SIZE;
955 pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE: 956 pring->sli.sli3.sizeRiocb =
957 phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE :
956 SLI2_IOCB_RSP_SIZE; 958 SLI2_IOCB_RSP_SIZE;
957 /* A ring MUST have both cmd and rsp entries defined to be 959 /* A ring MUST have both cmd and rsp entries defined to be
958 valid */ 960 valid */
959 if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) { 961 if ((pring->sli.sli3.numCiocb == 0) ||
962 (pring->sli.sli3.numRiocb == 0)) {
960 pcbp->rdsc[i].cmdEntries = 0; 963 pcbp->rdsc[i].cmdEntries = 0;
961 pcbp->rdsc[i].rspEntries = 0; 964 pcbp->rdsc[i].rspEntries = 0;
962 pcbp->rdsc[i].cmdAddrHigh = 0; 965 pcbp->rdsc[i].cmdAddrHigh = 0;
963 pcbp->rdsc[i].rspAddrHigh = 0; 966 pcbp->rdsc[i].rspAddrHigh = 0;
964 pcbp->rdsc[i].cmdAddrLow = 0; 967 pcbp->rdsc[i].cmdAddrLow = 0;
965 pcbp->rdsc[i].rspAddrLow = 0; 968 pcbp->rdsc[i].rspAddrLow = 0;
966 pring->cmdringaddr = NULL; 969 pring->sli.sli3.cmdringaddr = NULL;
967 pring->rspringaddr = NULL; 970 pring->sli.sli3.rspringaddr = NULL;
968 continue; 971 continue;
969 } 972 }
970 /* Command ring setup for ring */ 973 /* Command ring setup for ring */
971 pring->cmdringaddr = (void *)&phba->IOCBs[iocbCnt]; 974 pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
972 pcbp->rdsc[i].cmdEntries = pring->numCiocb; 975 pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb;
973 976
974 offset = (uint8_t *) &phba->IOCBs[iocbCnt] - 977 offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
975 (uint8_t *) phba->slim2p.virt; 978 (uint8_t *) phba->slim2p.virt;
976 pdma_addr = phba->slim2p.phys + offset; 979 pdma_addr = phba->slim2p.phys + offset;
977 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr); 980 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
978 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr); 981 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
979 iocbCnt += pring->numCiocb; 982 iocbCnt += pring->sli.sli3.numCiocb;
980 983
981 /* Response ring setup for ring */ 984 /* Response ring setup for ring */
982 pring->rspringaddr = (void *) &phba->IOCBs[iocbCnt]; 985 pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt];
983 986
984 pcbp->rdsc[i].rspEntries = pring->numRiocb; 987 pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb;
985 offset = (uint8_t *)&phba->IOCBs[iocbCnt] - 988 offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
986 (uint8_t *)phba->slim2p.virt; 989 (uint8_t *)phba->slim2p.virt;
987 pdma_addr = phba->slim2p.phys + offset; 990 pdma_addr = phba->slim2p.phys + offset;
988 pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr); 991 pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
989 pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr); 992 pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
990 iocbCnt += pring->numRiocb; 993 iocbCnt += pring->sli.sli3.numRiocb;
991 } 994 }
992} 995}
993 996
@@ -1609,12 +1612,15 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1609 1612
1610 switch (mbox->mbxCommand) { 1613 switch (mbox->mbxCommand) {
1611 case MBX_WRITE_NV: /* 0x03 */ 1614 case MBX_WRITE_NV: /* 0x03 */
1615 case MBX_DUMP_MEMORY: /* 0x17 */
1612 case MBX_UPDATE_CFG: /* 0x1B */ 1616 case MBX_UPDATE_CFG: /* 0x1B */
1613 case MBX_DOWN_LOAD: /* 0x1C */ 1617 case MBX_DOWN_LOAD: /* 0x1C */
1614 case MBX_DEL_LD_ENTRY: /* 0x1D */ 1618 case MBX_DEL_LD_ENTRY: /* 0x1D */
1619 case MBX_WRITE_VPARMS: /* 0x32 */
1615 case MBX_LOAD_AREA: /* 0x81 */ 1620 case MBX_LOAD_AREA: /* 0x81 */
1616 case MBX_WRITE_WWN: /* 0x98 */ 1621 case MBX_WRITE_WWN: /* 0x98 */
1617 case MBX_LOAD_EXP_ROM: /* 0x9C */ 1622 case MBX_LOAD_EXP_ROM: /* 0x9C */
1623 case MBX_ACCESS_VDATA: /* 0xA5 */
1618 return LPFC_MBOX_TMO_FLASH_CMD; 1624 return LPFC_MBOX_TMO_FLASH_CMD;
1619 case MBX_SLI4_CONFIG: /* 0x9b */ 1625 case MBX_SLI4_CONFIG: /* 0x9b */
1620 subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq); 1626 subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
@@ -1625,11 +1631,17 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1625 case LPFC_MBOX_OPCODE_WRITE_OBJECT: 1631 case LPFC_MBOX_OPCODE_WRITE_OBJECT:
1626 case LPFC_MBOX_OPCODE_READ_OBJECT_LIST: 1632 case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
1627 case LPFC_MBOX_OPCODE_DELETE_OBJECT: 1633 case LPFC_MBOX_OPCODE_DELETE_OBJECT:
1628 case LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG:
1629 case LPFC_MBOX_OPCODE_GET_PROFILE_LIST: 1634 case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
1630 case LPFC_MBOX_OPCODE_SET_ACT_PROFILE: 1635 case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
1636 case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG:
1631 case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG: 1637 case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
1632 case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG: 1638 case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
1639 case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES:
1640 case LPFC_MBOX_OPCODE_SEND_ACTIVATION:
1641 case LPFC_MBOX_OPCODE_RESET_LICENSES:
1642 case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG:
1643 case LPFC_MBOX_OPCODE_GET_VPD_DATA:
1644 case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG:
1633 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO; 1645 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1634 } 1646 }
1635 } 1647 }
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index ade763d3930..cd86069a0ba 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -194,6 +194,10 @@ lpfc_mem_free(struct lpfc_hba *phba)
194 pci_pool_destroy(phba->lpfc_hbq_pool); 194 pci_pool_destroy(phba->lpfc_hbq_pool);
195 phba->lpfc_hbq_pool = NULL; 195 phba->lpfc_hbq_pool = NULL;
196 196
197 if (phba->rrq_pool)
198 mempool_destroy(phba->rrq_pool);
199 phba->rrq_pool = NULL;
200
197 /* Free NLP memory pool */ 201 /* Free NLP memory pool */
198 mempool_destroy(phba->nlp_mem_pool); 202 mempool_destroy(phba->nlp_mem_pool);
199 phba->nlp_mem_pool = NULL; 203 phba->nlp_mem_pool = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 9133a97f045..d8fadcb2db7 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1778,6 +1778,117 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1778} 1778}
1779 1779
1780static uint32_t 1780static uint32_t
1781lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1782 void *arg, uint32_t evt)
1783{
1784 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1785 struct ls_rjt stat;
1786
1787 memset(&stat, 0, sizeof(struct ls_rjt));
1788 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1789 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1790 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1791 return ndlp->nlp_state;
1792}
1793
1794static uint32_t
1795lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1796 void *arg, uint32_t evt)
1797{
1798 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1799 struct ls_rjt stat;
1800
1801 memset(&stat, 0, sizeof(struct ls_rjt));
1802 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1803 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1804 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1805 return ndlp->nlp_state;
1806}
1807
1808static uint32_t
1809lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1810 void *arg, uint32_t evt)
1811{
1812 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1813 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1814
1815 spin_lock_irq(shost->host_lock);
1816 ndlp->nlp_flag &= NLP_LOGO_ACC;
1817 spin_unlock_irq(shost->host_lock);
1818 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1819 return ndlp->nlp_state;
1820}
1821
1822static uint32_t
1823lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1824 void *arg, uint32_t evt)
1825{
1826 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1827 struct ls_rjt stat;
1828
1829 memset(&stat, 0, sizeof(struct ls_rjt));
1830 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1831 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1832 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1833 return ndlp->nlp_state;
1834}
1835
1836static uint32_t
1837lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1838 void *arg, uint32_t evt)
1839{
1840 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1841 struct ls_rjt stat;
1842
1843 memset(&stat, 0, sizeof(struct ls_rjt));
1844 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1845 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1846 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1847 return ndlp->nlp_state;
1848}
1849
1850static uint32_t
1851lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1852 void *arg, uint32_t evt)
1853{
1854 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1855
1856 ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
1857 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1858 spin_lock_irq(shost->host_lock);
1859 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1860 spin_unlock_irq(shost->host_lock);
1861 lpfc_disc_set_adisc(vport, ndlp);
1862 return ndlp->nlp_state;
1863}
1864
1865static uint32_t
1866lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1867 void *arg, uint32_t evt)
1868{
1869 /*
1870 * Take no action. If a LOGO is outstanding, then possibly DevLoss has
1871 * timed out and is calling for Device Remove. In this case, the LOGO
1872 * must be allowed to complete in state LOGO_ISSUE so that the rpi
1873 * and other NLP flags are correctly cleaned up.
1874 */
1875 return ndlp->nlp_state;
1876}
1877
1878static uint32_t
1879lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
1880 struct lpfc_nodelist *ndlp,
1881 void *arg, uint32_t evt)
1882{
1883 /*
1884 * Device Recovery events have no meaning for a node with a LOGO
1885 * outstanding. The LOGO has to complete first and handle the
1886 * node from that point.
1887 */
1888 return ndlp->nlp_state;
1889}
1890
1891static uint32_t
1781lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1892lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1782 void *arg, uint32_t evt) 1893 void *arg, uint32_t evt)
1783{ 1894{
@@ -2083,6 +2194,8 @@ lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2083 void *arg, uint32_t evt) 2194 void *arg, uint32_t evt)
2084{ 2195{
2085 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2196 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2197
2198 /* For the fabric port just clear the fc flags. */
2086 if (ndlp->nlp_DID == Fabric_DID) { 2199 if (ndlp->nlp_DID == Fabric_DID) {
2087 spin_lock_irq(shost->host_lock); 2200 spin_lock_irq(shost->host_lock);
2088 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 2201 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -2297,6 +2410,20 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
2297 lpfc_device_rm_prli_issue, /* DEVICE_RM */ 2410 lpfc_device_rm_prli_issue, /* DEVICE_RM */
2298 lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */ 2411 lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
2299 2412
2413 lpfc_rcv_plogi_logo_issue, /* RCV_PLOGI LOGO_ISSUE */
2414 lpfc_rcv_prli_logo_issue, /* RCV_PRLI */
2415 lpfc_rcv_logo_logo_issue, /* RCV_LOGO */
2416 lpfc_rcv_padisc_logo_issue, /* RCV_ADISC */
2417 lpfc_rcv_padisc_logo_issue, /* RCV_PDISC */
2418 lpfc_rcv_prlo_logo_issue, /* RCV_PRLO */
2419 lpfc_cmpl_plogi_illegal, /* CMPL_PLOGI */
2420 lpfc_disc_illegal, /* CMPL_PRLI */
2421 lpfc_cmpl_logo_logo_issue, /* CMPL_LOGO */
2422 lpfc_disc_illegal, /* CMPL_ADISC */
2423 lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2424 lpfc_device_rm_logo_issue, /* DEVICE_RM */
2425 lpfc_device_recov_logo_issue, /* DEVICE_RECOVERY */
2426
2300 lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */ 2427 lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
2301 lpfc_rcv_prli_unmap_node, /* RCV_PRLI */ 2428 lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
2302 lpfc_rcv_logo_unmap_node, /* RCV_LOGO */ 2429 lpfc_rcv_logo_unmap_node, /* RCV_LOGO */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 925975d2d76..64013f3097a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -60,12 +60,6 @@ static char *dif_op_str[] = {
60 "PROT_WRITE_PASS", 60 "PROT_WRITE_PASS",
61}; 61};
62 62
63static char *dif_grd_str[] = {
64 "NO_GUARD",
65 "DIF_CRC",
66 "DIX_IP",
67};
68
69struct scsi_dif_tuple { 63struct scsi_dif_tuple {
70 __be16 guard_tag; /* Checksum */ 64 __be16 guard_tag; /* Checksum */
71 __be16 app_tag; /* Opaque storage */ 65 __be16 app_tag; /* Opaque storage */
@@ -3482,9 +3476,15 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3482 } 3476 }
3483 lp = (uint32_t *)cmnd->sense_buffer; 3477 lp = (uint32_t *)cmnd->sense_buffer;
3484 3478
3485 if (!scsi_status && (resp_info & RESID_UNDER) && 3479 /* special handling for under run conditions */
3486 vport->cfg_log_verbose & LOG_FCP_UNDER) 3480 if (!scsi_status && (resp_info & RESID_UNDER)) {
3487 logit = LOG_FCP_UNDER; 3481 /* don't log under runs if fcp set... */
3482 if (vport->cfg_log_verbose & LOG_FCP)
3483 logit = LOG_FCP_ERROR;
3484 /* unless operator says so */
3485 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3486 logit = LOG_FCP_UNDER;
3487 }
3488 3488
3489 lpfc_printf_vlog(vport, KERN_WARNING, logit, 3489 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3490 "9024 FCP command x%x failed: x%x SNS x%x x%x " 3490 "9024 FCP command x%x failed: x%x SNS x%x x%x "
@@ -3552,11 +3552,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3552 3552
3553 /* 3553 /*
3554 * Check SLI validation that all the transfer was actually done 3554 * Check SLI validation that all the transfer was actually done
3555 * (fcpi_parm should be zero). Apply check only to reads. 3555 * (fcpi_parm should be zero).
3556 */ 3556 */
3557 } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 3557 } else if (fcpi_parm) {
3558 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 3558 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3559 "9029 FCP Read Check Error Data: " 3559 "9029 FCP Data Transfer Check Error: "
3560 "x%x x%x x%x x%x x%x\n", 3560 "x%x x%x x%x x%x x%x\n",
3561 be32_to_cpu(fcpcmd->fcpDl), 3561 be32_to_cpu(fcpcmd->fcpDl),
3562 be32_to_cpu(fcprsp->rspResId), 3562 be32_to_cpu(fcprsp->rspResId),
@@ -3615,7 +3615,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3615 cmd = lpfc_cmd->pCmd; 3615 cmd = lpfc_cmd->pCmd;
3616 shost = cmd->device->host; 3616 shost = cmd->device->host;
3617 3617
3618 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; 3618 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
3619 lpfc_cmd->status = pIocbOut->iocb.ulpStatus; 3619 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3620 /* pick up SLI4 exhange busy status from HBA */ 3620 /* pick up SLI4 exhange busy status from HBA */
3621 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY; 3621 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
@@ -3660,10 +3660,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3660 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 3660 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3661 else if (lpfc_cmd->status >= IOSTAT_CNT) 3661 else if (lpfc_cmd->status >= IOSTAT_CNT)
3662 lpfc_cmd->status = IOSTAT_DEFAULT; 3662 lpfc_cmd->status = IOSTAT_DEFAULT;
3663 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR 3663 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
3664 && !lpfc_cmd->fcp_rsp->rspStatus3 3664 !lpfc_cmd->fcp_rsp->rspStatus3 &&
3665 && (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) 3665 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
3666 && !(phba->cfg_log_verbose & LOG_FCP_UNDER)) 3666 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3667 logit = 0; 3667 logit = 0;
3668 else 3668 else
3669 logit = LOG_FCP | LOG_FCP_UNDER; 3669 logit = LOG_FCP | LOG_FCP_UNDER;
@@ -3829,12 +3829,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3829 cmd->scsi_done(cmd); 3829 cmd->scsi_done(cmd);
3830 3830
3831 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 3831 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3832 spin_lock_irq(&phba->hbalock);
3833 lpfc_cmd->pCmd = NULL;
3834 spin_unlock_irq(&phba->hbalock);
3835
3832 /* 3836 /*
3833 * If there is a thread waiting for command completion 3837 * If there is a thread waiting for command completion
3834 * wake up the thread. 3838 * wake up the thread.
3835 */ 3839 */
3836 spin_lock_irqsave(shost->host_lock, flags); 3840 spin_lock_irqsave(shost->host_lock, flags);
3837 lpfc_cmd->pCmd = NULL;
3838 if (lpfc_cmd->waitq) 3841 if (lpfc_cmd->waitq)
3839 wake_up(lpfc_cmd->waitq); 3842 wake_up(lpfc_cmd->waitq);
3840 spin_unlock_irqrestore(shost->host_lock, flags); 3843 spin_unlock_irqrestore(shost->host_lock, flags);
@@ -3868,12 +3871,15 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3868 } 3871 }
3869 } 3872 }
3870 3873
3874 spin_lock_irq(&phba->hbalock);
3875 lpfc_cmd->pCmd = NULL;
3876 spin_unlock_irq(&phba->hbalock);
3877
3871 /* 3878 /*
3872 * If there is a thread waiting for command completion 3879 * If there is a thread waiting for command completion
3873 * wake up the thread. 3880 * wake up the thread.
3874 */ 3881 */
3875 spin_lock_irqsave(shost->host_lock, flags); 3882 spin_lock_irqsave(shost->host_lock, flags);
3876 lpfc_cmd->pCmd = NULL;
3877 if (lpfc_cmd->waitq) 3883 if (lpfc_cmd->waitq)
3878 wake_up(lpfc_cmd->waitq); 3884 wake_up(lpfc_cmd->waitq);
3879 spin_unlock_irqrestore(shost->host_lock, flags); 3885 spin_unlock_irqrestore(shost->host_lock, flags);
@@ -3919,6 +3925,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3919 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); 3925 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
3920 int datadir = scsi_cmnd->sc_data_direction; 3926 int datadir = scsi_cmnd->sc_data_direction;
3921 char tag[2]; 3927 char tag[2];
3928 uint8_t *ptr;
3929 bool sli4;
3922 3930
3923 if (!pnode || !NLP_CHK_NODE_ACT(pnode)) 3931 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3924 return; 3932 return;
@@ -3930,8 +3938,13 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3930 int_to_scsilun(lpfc_cmd->pCmd->device->lun, 3938 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
3931 &lpfc_cmd->fcp_cmnd->fcp_lun); 3939 &lpfc_cmd->fcp_cmnd->fcp_lun);
3932 3940
3933 memset(&fcp_cmnd->fcpCdb[0], 0, LPFC_FCP_CDB_LEN); 3941 ptr = &fcp_cmnd->fcpCdb[0];
3934 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, scsi_cmnd->cmd_len); 3942 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
3943 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
3944 ptr += scsi_cmnd->cmd_len;
3945 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
3946 }
3947
3935 if (scsi_populate_tag_msg(scsi_cmnd, tag)) { 3948 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
3936 switch (tag[0]) { 3949 switch (tag[0]) {
3937 case HEAD_OF_QUEUE_TAG: 3950 case HEAD_OF_QUEUE_TAG:
@@ -3947,6 +3960,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3947 } else 3960 } else
3948 fcp_cmnd->fcpCntl1 = 0; 3961 fcp_cmnd->fcpCntl1 = 0;
3949 3962
3963 sli4 = (phba->sli_rev == LPFC_SLI_REV4);
3964
3950 /* 3965 /*
3951 * There are three possibilities here - use scatter-gather segment, use 3966 * There are three possibilities here - use scatter-gather segment, use
3952 * the single mapping, or neither. Start the lpfc command prep by 3967 * the single mapping, or neither. Start the lpfc command prep by
@@ -3956,11 +3971,12 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3956 if (scsi_sg_count(scsi_cmnd)) { 3971 if (scsi_sg_count(scsi_cmnd)) {
3957 if (datadir == DMA_TO_DEVICE) { 3972 if (datadir == DMA_TO_DEVICE) {
3958 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 3973 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
3959 if (phba->sli_rev < LPFC_SLI_REV4) { 3974 if (sli4)
3975 iocb_cmd->ulpPU = PARM_READ_CHECK;
3976 else {
3960 iocb_cmd->un.fcpi.fcpi_parm = 0; 3977 iocb_cmd->un.fcpi.fcpi_parm = 0;
3961 iocb_cmd->ulpPU = 0; 3978 iocb_cmd->ulpPU = 0;
3962 } else 3979 }
3963 iocb_cmd->ulpPU = PARM_READ_CHECK;
3964 fcp_cmnd->fcpCntl3 = WRITE_DATA; 3980 fcp_cmnd->fcpCntl3 = WRITE_DATA;
3965 phba->fc4OutputRequests++; 3981 phba->fc4OutputRequests++;
3966 } else { 3982 } else {
@@ -3984,7 +4000,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3984 * of the scsi_cmnd request_buffer 4000 * of the scsi_cmnd request_buffer
3985 */ 4001 */
3986 piocbq->iocb.ulpContext = pnode->nlp_rpi; 4002 piocbq->iocb.ulpContext = pnode->nlp_rpi;
3987 if (phba->sli_rev == LPFC_SLI_REV4) 4003 if (sli4)
3988 piocbq->iocb.ulpContext = 4004 piocbq->iocb.ulpContext =
3989 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]; 4005 phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
3990 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE) 4006 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
@@ -4241,9 +4257,8 @@ void lpfc_poll_timeout(unsigned long ptr)
4241 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. 4257 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4242 **/ 4258 **/
4243static int 4259static int
4244lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 4260lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4245{ 4261{
4246 struct Scsi_Host *shost = cmnd->device->host;
4247 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4262 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4248 struct lpfc_hba *phba = vport->phba; 4263 struct lpfc_hba *phba = vport->phba;
4249 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 4264 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
@@ -4299,53 +4314,28 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
4299 lpfc_cmd->timeout = 0; 4314 lpfc_cmd->timeout = 0;
4300 lpfc_cmd->start_time = jiffies; 4315 lpfc_cmd->start_time = jiffies;
4301 cmnd->host_scribble = (unsigned char *)lpfc_cmd; 4316 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4302 cmnd->scsi_done = done;
4303 4317
4304 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 4318 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4305 if (vport->phba->cfg_enable_bg) { 4319 if (vport->phba->cfg_enable_bg) {
4306 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4320 lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
4307 "9033 BLKGRD: rcvd protected cmd:%02x op=%s " 4321 "9033 BLKGRD: rcvd %s cmd:x%x "
4308 "guard=%s\n", cmnd->cmnd[0], 4322 "sector x%llx cnt %u pt %x\n",
4309 dif_op_str[scsi_get_prot_op(cmnd)], 4323 dif_op_str[scsi_get_prot_op(cmnd)],
4310 dif_grd_str[scsi_host_get_guard(shost)]); 4324 cmnd->cmnd[0],
4311 if (cmnd->cmnd[0] == READ_10) 4325 (unsigned long long)scsi_get_lba(cmnd),
4312 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4326 blk_rq_sectors(cmnd->request),
4313 "9035 BLKGRD: READ @ sector %llu, " 4327 (cmnd->cmnd[1]>>5));
4314 "cnt %u, rpt %d\n",
4315 (unsigned long long)scsi_get_lba(cmnd),
4316 blk_rq_sectors(cmnd->request),
4317 (cmnd->cmnd[1]>>5));
4318 else if (cmnd->cmnd[0] == WRITE_10)
4319 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4320 "9036 BLKGRD: WRITE @ sector %llu, "
4321 "cnt %u, wpt %d\n",
4322 (unsigned long long)scsi_get_lba(cmnd),
4323 blk_rq_sectors(cmnd->request),
4324 (cmnd->cmnd[1]>>5));
4325 } 4328 }
4326
4327 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 4329 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4328 } else { 4330 } else {
4329 if (vport->phba->cfg_enable_bg) { 4331 if (vport->phba->cfg_enable_bg) {
4330 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4332 lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
4331 "9038 BLKGRD: rcvd unprotected cmd:" 4333 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4332 "%02x op=%s guard=%s\n", cmnd->cmnd[0], 4334 "x%x sector x%llx cnt %u pt %x\n",
4333 dif_op_str[scsi_get_prot_op(cmnd)], 4335 cmnd->cmnd[0],
4334 dif_grd_str[scsi_host_get_guard(shost)]); 4336 (unsigned long long)scsi_get_lba(cmnd),
4335 if (cmnd->cmnd[0] == READ_10)
4336 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4337 "9040 dbg: READ @ sector %llu, "
4338 "cnt %u, rpt %d\n",
4339 (unsigned long long)scsi_get_lba(cmnd),
4340 blk_rq_sectors(cmnd->request), 4337 blk_rq_sectors(cmnd->request),
4341 (cmnd->cmnd[1]>>5)); 4338 (cmnd->cmnd[1]>>5));
4342 else if (cmnd->cmnd[0] == WRITE_10)
4343 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4344 "9041 dbg: WRITE @ sector %llu, "
4345 "cnt %u, wpt %d\n",
4346 (unsigned long long)scsi_get_lba(cmnd),
4347 blk_rq_sectors(cmnd->request),
4348 (cmnd->cmnd[1]>>5));
4349 } 4339 }
4350 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 4340 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4351 } 4341 }
@@ -4363,11 +4353,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
4363 goto out_host_busy_free_buf; 4353 goto out_host_busy_free_buf;
4364 } 4354 }
4365 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 4355 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4366 spin_unlock(shost->host_lock);
4367 lpfc_sli_handle_fast_ring_event(phba, 4356 lpfc_sli_handle_fast_ring_event(phba,
4368 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); 4357 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4369 4358
4370 spin_lock(shost->host_lock);
4371 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 4359 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4372 lpfc_poll_rearm_timer(phba); 4360 lpfc_poll_rearm_timer(phba);
4373 } 4361 }
@@ -4384,11 +4372,10 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
4384 return SCSI_MLQUEUE_TARGET_BUSY; 4372 return SCSI_MLQUEUE_TARGET_BUSY;
4385 4373
4386 out_fail_command: 4374 out_fail_command:
4387 done(cmnd); 4375 cmnd->scsi_done(cmnd);
4388 return 0; 4376 return 0;
4389} 4377}
4390 4378
4391static DEF_SCSI_QCMD(lpfc_queuecommand)
4392 4379
4393/** 4380/**
4394 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point 4381 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
@@ -4414,7 +4401,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4414 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); 4401 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4415 4402
4416 status = fc_block_scsi_eh(cmnd); 4403 status = fc_block_scsi_eh(cmnd);
4417 if (status) 4404 if (status != 0 && status != SUCCESS)
4418 return status; 4405 return status;
4419 4406
4420 spin_lock_irq(&phba->hbalock); 4407 spin_lock_irq(&phba->hbalock);
@@ -4428,7 +4415,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4428 } 4415 }
4429 4416
4430 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 4417 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4431 if (!lpfc_cmd) { 4418 if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4432 spin_unlock_irq(&phba->hbalock); 4419 spin_unlock_irq(&phba->hbalock);
4433 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 4420 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4434 "2873 SCSI Layer I/O Abort Request IO CMPL Status " 4421 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
@@ -4521,9 +4508,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4521 ret = FAILED; 4508 ret = FAILED;
4522 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 4509 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4523 "0748 abort handler timed out waiting " 4510 "0748 abort handler timed out waiting "
4524 "for abort to complete: ret %#x, ID %d, " 4511 "for abortng I/O (xri:x%x) to complete: "
4525 "LUN %d\n", 4512 "ret %#x, ID %d, LUN %d\n",
4526 ret, cmnd->device->id, cmnd->device->lun); 4513 iocb->sli4_xritag, ret,
4514 cmnd->device->id, cmnd->device->lun);
4527 } 4515 }
4528 goto out; 4516 goto out;
4529 4517
@@ -4769,7 +4757,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
4769 } 4757 }
4770 pnode = rdata->pnode; 4758 pnode = rdata->pnode;
4771 status = fc_block_scsi_eh(cmnd); 4759 status = fc_block_scsi_eh(cmnd);
4772 if (status) 4760 if (status != 0 && status != SUCCESS)
4773 return status; 4761 return status;
4774 4762
4775 status = lpfc_chk_tgt_mapped(vport, cmnd); 4763 status = lpfc_chk_tgt_mapped(vport, cmnd);
@@ -4836,7 +4824,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
4836 } 4824 }
4837 pnode = rdata->pnode; 4825 pnode = rdata->pnode;
4838 status = fc_block_scsi_eh(cmnd); 4826 status = fc_block_scsi_eh(cmnd);
4839 if (status) 4827 if (status != 0 && status != SUCCESS)
4840 return status; 4828 return status;
4841 4829
4842 status = lpfc_chk_tgt_mapped(vport, cmnd); 4830 status = lpfc_chk_tgt_mapped(vport, cmnd);
@@ -4904,7 +4892,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
4904 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 4892 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
4905 4893
4906 status = fc_block_scsi_eh(cmnd); 4894 status = fc_block_scsi_eh(cmnd);
4907 if (status) 4895 if (status != 0 && status != SUCCESS)
4908 return status; 4896 return status;
4909 4897
4910 /* 4898 /*
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 0e7e144507b..219bf534ef9 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -69,6 +69,8 @@ static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69 struct lpfc_cqe *); 69 struct lpfc_cqe *);
70static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *, 70static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
71 int); 71 int);
72static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
73 uint32_t);
72 74
73static IOCB_t * 75static IOCB_t *
74lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 76lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -94,6 +96,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
94 union lpfc_wqe *temp_wqe; 96 union lpfc_wqe *temp_wqe;
95 struct lpfc_register doorbell; 97 struct lpfc_register doorbell;
96 uint32_t host_index; 98 uint32_t host_index;
99 uint32_t idx;
97 100
98 /* sanity check on queue memory */ 101 /* sanity check on queue memory */
99 if (unlikely(!q)) 102 if (unlikely(!q))
@@ -101,8 +104,12 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
101 temp_wqe = q->qe[q->host_index].wqe; 104 temp_wqe = q->qe[q->host_index].wqe;
102 105
103 /* If the host has not yet processed the next entry then we are done */ 106 /* If the host has not yet processed the next entry then we are done */
104 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 107 idx = ((q->host_index + 1) % q->entry_count);
108 if (idx == q->hba_index) {
109 q->WQ_overflow++;
105 return -ENOMEM; 110 return -ENOMEM;
111 }
112 q->WQ_posted++;
106 /* set consumption flag every once in a while */ 113 /* set consumption flag every once in a while */
107 if (!((q->host_index + 1) % q->entry_repost)) 114 if (!((q->host_index + 1) % q->entry_repost))
108 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 115 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
@@ -112,7 +119,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
112 119
113 /* Update the host index before invoking device */ 120 /* Update the host index before invoking device */
114 host_index = q->host_index; 121 host_index = q->host_index;
115 q->host_index = ((q->host_index + 1) % q->entry_count); 122
123 q->host_index = idx;
116 124
117 /* Ring Doorbell */ 125 /* Ring Doorbell */
118 doorbell.word0 = 0; 126 doorbell.word0 = 0;
@@ -120,7 +128,6 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
120 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); 128 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
121 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); 129 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
122 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); 130 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
123 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
124 131
125 return 0; 132 return 0;
126} 133}
@@ -194,7 +201,6 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
194 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 201 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
195 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 202 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
196 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 203 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
197 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
198 return 0; 204 return 0;
199} 205}
200 206
@@ -234,6 +240,7 @@ static struct lpfc_eqe *
234lpfc_sli4_eq_get(struct lpfc_queue *q) 240lpfc_sli4_eq_get(struct lpfc_queue *q)
235{ 241{
236 struct lpfc_eqe *eqe; 242 struct lpfc_eqe *eqe;
243 uint32_t idx;
237 244
238 /* sanity check on queue memory */ 245 /* sanity check on queue memory */
239 if (unlikely(!q)) 246 if (unlikely(!q))
@@ -244,14 +251,34 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
244 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 251 if (!bf_get_le32(lpfc_eqe_valid, eqe))
245 return NULL; 252 return NULL;
246 /* If the host has not yet processed the next entry then we are done */ 253 /* If the host has not yet processed the next entry then we are done */
247 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 254 idx = ((q->hba_index + 1) % q->entry_count);
255 if (idx == q->host_index)
248 return NULL; 256 return NULL;
249 257
250 q->hba_index = ((q->hba_index + 1) % q->entry_count); 258 q->hba_index = idx;
251 return eqe; 259 return eqe;
252} 260}
253 261
254/** 262/**
263 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
264 * @q: The Event Queue to disable interrupts
265 *
266 **/
267static inline void
268lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
269{
270 struct lpfc_register doorbell;
271
272 doorbell.word0 = 0;
273 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
274 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
275 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
276 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
277 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
278 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
279}
280
281/**
255 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 282 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
256 * @q: The Event Queue that the host has completed processing for. 283 * @q: The Event Queue that the host has completed processing for.
257 * @arm: Indicates whether the host wants to arms this CQ. 284 * @arm: Indicates whether the host wants to arms this CQ.
@@ -318,6 +345,7 @@ static struct lpfc_cqe *
318lpfc_sli4_cq_get(struct lpfc_queue *q) 345lpfc_sli4_cq_get(struct lpfc_queue *q)
319{ 346{
320 struct lpfc_cqe *cqe; 347 struct lpfc_cqe *cqe;
348 uint32_t idx;
321 349
322 /* sanity check on queue memory */ 350 /* sanity check on queue memory */
323 if (unlikely(!q)) 351 if (unlikely(!q))
@@ -327,11 +355,12 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
327 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 355 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
328 return NULL; 356 return NULL;
329 /* If the host has not yet processed the next entry then we are done */ 357 /* If the host has not yet processed the next entry then we are done */
330 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 358 idx = ((q->hba_index + 1) % q->entry_count);
359 if (idx == q->host_index)
331 return NULL; 360 return NULL;
332 361
333 cqe = q->qe[q->hba_index].cqe; 362 cqe = q->qe[q->hba_index].cqe;
334 q->hba_index = ((q->hba_index + 1) % q->entry_count); 363 q->hba_index = idx;
335 return cqe; 364 return cqe;
336} 365}
337 366
@@ -472,8 +501,8 @@ lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
472static inline IOCB_t * 501static inline IOCB_t *
473lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 502lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
474{ 503{
475 return (IOCB_t *) (((char *) pring->cmdringaddr) + 504 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
476 pring->cmdidx * phba->iocb_cmd_size); 505 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
477} 506}
478 507
479/** 508/**
@@ -489,8 +518,8 @@ lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
489static inline IOCB_t * 518static inline IOCB_t *
490lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 519lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
491{ 520{
492 return (IOCB_t *) (((char *) pring->rspringaddr) + 521 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
493 pring->rspidx * phba->iocb_rsp_size); 522 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
494} 523}
495 524
496/** 525/**
@@ -1320,21 +1349,23 @@ static IOCB_t *
1320lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1349lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1321{ 1350{
1322 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1351 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1323 uint32_t max_cmd_idx = pring->numCiocb; 1352 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1324 if ((pring->next_cmdidx == pring->cmdidx) && 1353 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1325 (++pring->next_cmdidx >= max_cmd_idx)) 1354 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1326 pring->next_cmdidx = 0; 1355 pring->sli.sli3.next_cmdidx = 0;
1327 1356
1328 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 1357 if (unlikely(pring->sli.sli3.local_getidx ==
1358 pring->sli.sli3.next_cmdidx)) {
1329 1359
1330 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1360 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1331 1361
1332 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 1362 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1333 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1363 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1334 "0315 Ring %d issue: portCmdGet %d " 1364 "0315 Ring %d issue: portCmdGet %d "
1335 "is bigger than cmd ring %d\n", 1365 "is bigger than cmd ring %d\n",
1336 pring->ringno, 1366 pring->ringno,
1337 pring->local_getidx, max_cmd_idx); 1367 pring->sli.sli3.local_getidx,
1368 max_cmd_idx);
1338 1369
1339 phba->link_state = LPFC_HBA_ERROR; 1370 phba->link_state = LPFC_HBA_ERROR;
1340 /* 1371 /*
@@ -1349,7 +1380,7 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1349 return NULL; 1380 return NULL;
1350 } 1381 }
1351 1382
1352 if (pring->local_getidx == pring->next_cmdidx) 1383 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1353 return NULL; 1384 return NULL;
1354 } 1385 }
1355 1386
@@ -1484,8 +1515,8 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1484 * Let the HBA know what IOCB slot will be the next one the 1515 * Let the HBA know what IOCB slot will be the next one the
1485 * driver will put a command into. 1516 * driver will put a command into.
1486 */ 1517 */
1487 pring->cmdidx = pring->next_cmdidx; 1518 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1488 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1519 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1489} 1520}
1490 1521
1491/** 1522/**
@@ -2056,6 +2087,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2056 case MBX_READ_EVENT_LOG: 2087 case MBX_READ_EVENT_LOG:
2057 case MBX_SECURITY_MGMT: 2088 case MBX_SECURITY_MGMT:
2058 case MBX_AUTH_PORT: 2089 case MBX_AUTH_PORT:
2090 case MBX_ACCESS_VDATA:
2059 ret = mbxCommand; 2091 ret = mbxCommand;
2060 break; 2092 break;
2061 default: 2093 default:
@@ -2786,7 +2818,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2786 "0312 Ring %d handler: portRspPut %d " 2818 "0312 Ring %d handler: portRspPut %d "
2787 "is bigger than rsp ring %d\n", 2819 "is bigger than rsp ring %d\n",
2788 pring->ringno, le32_to_cpu(pgp->rspPutInx), 2820 pring->ringno, le32_to_cpu(pgp->rspPutInx),
2789 pring->numRiocb); 2821 pring->sli.sli3.numRiocb);
2790 2822
2791 phba->link_state = LPFC_HBA_ERROR; 2823 phba->link_state = LPFC_HBA_ERROR;
2792 2824
@@ -2815,10 +2847,26 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2815void lpfc_poll_eratt(unsigned long ptr) 2847void lpfc_poll_eratt(unsigned long ptr)
2816{ 2848{
2817 struct lpfc_hba *phba; 2849 struct lpfc_hba *phba;
2818 uint32_t eratt = 0; 2850 uint32_t eratt = 0, rem;
2851 uint64_t sli_intr, cnt;
2819 2852
2820 phba = (struct lpfc_hba *)ptr; 2853 phba = (struct lpfc_hba *)ptr;
2821 2854
2855 /* Here we will also keep track of interrupts per sec of the hba */
2856 sli_intr = phba->sli.slistat.sli_intr;
2857
2858 if (phba->sli.slistat.sli_prev_intr > sli_intr)
2859 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
2860 sli_intr);
2861 else
2862 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
2863
2864 /* 64-bit integer division not supporte on 32-bit x86 - use do_div */
2865 rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
2866 phba->sli.slistat.sli_ips = cnt;
2867
2868 phba->sli.slistat.sli_prev_intr = sli_intr;
2869
2822 /* Check chip HA register for error event */ 2870 /* Check chip HA register for error event */
2823 eratt = lpfc_sli_check_eratt(phba); 2871 eratt = lpfc_sli_check_eratt(phba);
2824 2872
@@ -2873,7 +2921,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2873 * The next available response entry should never exceed the maximum 2921 * The next available response entry should never exceed the maximum
2874 * entries. If it does, treat it as an adapter hardware error. 2922 * entries. If it does, treat it as an adapter hardware error.
2875 */ 2923 */
2876 portRspMax = pring->numRiocb; 2924 portRspMax = pring->sli.sli3.numRiocb;
2877 portRspPut = le32_to_cpu(pgp->rspPutInx); 2925 portRspPut = le32_to_cpu(pgp->rspPutInx);
2878 if (unlikely(portRspPut >= portRspMax)) { 2926 if (unlikely(portRspPut >= portRspMax)) {
2879 lpfc_sli_rsp_pointers_error(phba, pring); 2927 lpfc_sli_rsp_pointers_error(phba, pring);
@@ -2887,7 +2935,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2887 phba->fcp_ring_in_use = 1; 2935 phba->fcp_ring_in_use = 1;
2888 2936
2889 rmb(); 2937 rmb();
2890 while (pring->rspidx != portRspPut) { 2938 while (pring->sli.sli3.rspidx != portRspPut) {
2891 /* 2939 /*
2892 * Fetch an entry off the ring and copy it into a local data 2940 * Fetch an entry off the ring and copy it into a local data
2893 * structure. The copy involves a byte-swap since the 2941 * structure. The copy involves a byte-swap since the
@@ -2896,8 +2944,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2896 entry = lpfc_resp_iocb(phba, pring); 2944 entry = lpfc_resp_iocb(phba, pring);
2897 phba->last_completion_time = jiffies; 2945 phba->last_completion_time = jiffies;
2898 2946
2899 if (++pring->rspidx >= portRspMax) 2947 if (++pring->sli.sli3.rspidx >= portRspMax)
2900 pring->rspidx = 0; 2948 pring->sli.sli3.rspidx = 0;
2901 2949
2902 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 2950 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2903 (uint32_t *) &rspiocbq.iocb, 2951 (uint32_t *) &rspiocbq.iocb,
@@ -2915,7 +2963,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2915 * queuedepths of the SCSI device. 2963 * queuedepths of the SCSI device.
2916 */ 2964 */
2917 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2965 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2918 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2966 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
2967 IOERR_NO_RESOURCES)) {
2919 spin_unlock_irqrestore(&phba->hbalock, iflag); 2968 spin_unlock_irqrestore(&phba->hbalock, iflag);
2920 phba->lpfc_rampdown_queue_depth(phba); 2969 phba->lpfc_rampdown_queue_depth(phba);
2921 spin_lock_irqsave(&phba->hbalock, iflag); 2970 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -2998,9 +3047,10 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2998 * been updated, sync the pgp->rspPutInx and fetch the new port 3047 * been updated, sync the pgp->rspPutInx and fetch the new port
2999 * response put pointer. 3048 * response put pointer.
3000 */ 3049 */
3001 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 3050 writel(pring->sli.sli3.rspidx,
3051 &phba->host_gp[pring->ringno].rspGetInx);
3002 3052
3003 if (pring->rspidx == portRspPut) 3053 if (pring->sli.sli3.rspidx == portRspPut)
3004 portRspPut = le32_to_cpu(pgp->rspPutInx); 3054 portRspPut = le32_to_cpu(pgp->rspPutInx);
3005 } 3055 }
3006 3056
@@ -3015,7 +3065,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3015 pring->stats.iocb_cmd_empty++; 3065 pring->stats.iocb_cmd_empty++;
3016 3066
3017 /* Force update of the local copy of cmdGetInx */ 3067 /* Force update of the local copy of cmdGetInx */
3018 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 3068 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3019 lpfc_sli_resume_iocb(phba, pring); 3069 lpfc_sli_resume_iocb(phba, pring);
3020 3070
3021 if ((pring->lpfc_sli_cmd_available)) 3071 if ((pring->lpfc_sli_cmd_available))
@@ -3086,7 +3136,8 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3086 * queuedepths of the SCSI device. 3136 * queuedepths of the SCSI device.
3087 */ 3137 */
3088 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3138 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3089 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 3139 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3140 IOERR_NO_RESOURCES)) {
3090 spin_unlock_irqrestore(&phba->hbalock, iflag); 3141 spin_unlock_irqrestore(&phba->hbalock, iflag);
3091 phba->lpfc_rampdown_queue_depth(phba); 3142 phba->lpfc_rampdown_queue_depth(phba);
3092 spin_lock_irqsave(&phba->hbalock, iflag); 3143 spin_lock_irqsave(&phba->hbalock, iflag);
@@ -3247,7 +3298,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3247 * The next available response entry should never exceed the maximum 3298 * The next available response entry should never exceed the maximum
3248 * entries. If it does, treat it as an adapter hardware error. 3299 * entries. If it does, treat it as an adapter hardware error.
3249 */ 3300 */
3250 portRspMax = pring->numRiocb; 3301 portRspMax = pring->sli.sli3.numRiocb;
3251 portRspPut = le32_to_cpu(pgp->rspPutInx); 3302 portRspPut = le32_to_cpu(pgp->rspPutInx);
3252 if (portRspPut >= portRspMax) { 3303 if (portRspPut >= portRspMax) {
3253 /* 3304 /*
@@ -3269,7 +3320,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3269 } 3320 }
3270 3321
3271 rmb(); 3322 rmb();
3272 while (pring->rspidx != portRspPut) { 3323 while (pring->sli.sli3.rspidx != portRspPut) {
3273 /* 3324 /*
3274 * Build a completion list and call the appropriate handler. 3325 * Build a completion list and call the appropriate handler.
3275 * The process is to get the next available response iocb, get 3326 * The process is to get the next available response iocb, get
@@ -3297,8 +3348,8 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3297 phba->iocb_rsp_size); 3348 phba->iocb_rsp_size);
3298 irsp = &rspiocbp->iocb; 3349 irsp = &rspiocbp->iocb;
3299 3350
3300 if (++pring->rspidx >= portRspMax) 3351 if (++pring->sli.sli3.rspidx >= portRspMax)
3301 pring->rspidx = 0; 3352 pring->sli.sli3.rspidx = 0;
3302 3353
3303 if (pring->ringno == LPFC_ELS_RING) { 3354 if (pring->ringno == LPFC_ELS_RING) {
3304 lpfc_debugfs_slow_ring_trc(phba, 3355 lpfc_debugfs_slow_ring_trc(phba,
@@ -3308,7 +3359,8 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3308 *(((uint32_t *) irsp) + 7)); 3359 *(((uint32_t *) irsp) + 7));
3309 } 3360 }
3310 3361
3311 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 3362 writel(pring->sli.sli3.rspidx,
3363 &phba->host_gp[pring->ringno].rspGetInx);
3312 3364
3313 spin_unlock_irqrestore(&phba->hbalock, iflag); 3365 spin_unlock_irqrestore(&phba->hbalock, iflag);
3314 /* Handle the response IOCB */ 3366 /* Handle the response IOCB */
@@ -3320,10 +3372,10 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3320 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3372 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3321 * response put pointer. 3373 * response put pointer.
3322 */ 3374 */
3323 if (pring->rspidx == portRspPut) { 3375 if (pring->sli.sli3.rspidx == portRspPut) {
3324 portRspPut = le32_to_cpu(pgp->rspPutInx); 3376 portRspPut = le32_to_cpu(pgp->rspPutInx);
3325 } 3377 }
3326 } /* while (pring->rspidx != portRspPut) */ 3378 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3327 3379
3328 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3380 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3329 /* At least one response entry has been freed */ 3381 /* At least one response entry has been freed */
@@ -3338,7 +3390,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3338 pring->stats.iocb_cmd_empty++; 3390 pring->stats.iocb_cmd_empty++;
3339 3391
3340 /* Force update of the local copy of cmdGetInx */ 3392 /* Force update of the local copy of cmdGetInx */
3341 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 3393 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3342 lpfc_sli_resume_iocb(phba, pring); 3394 lpfc_sli_resume_iocb(phba, pring);
3343 3395
3344 if ((pring->lpfc_sli_cmd_available)) 3396 if ((pring->lpfc_sli_cmd_available))
@@ -3859,10 +3911,10 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
3859 for (i = 0; i < psli->num_rings; i++) { 3911 for (i = 0; i < psli->num_rings; i++) {
3860 pring = &psli->ring[i]; 3912 pring = &psli->ring[i];
3861 pring->flag = 0; 3913 pring->flag = 0;
3862 pring->rspidx = 0; 3914 pring->sli.sli3.rspidx = 0;
3863 pring->next_cmdidx = 0; 3915 pring->sli.sli3.next_cmdidx = 0;
3864 pring->local_getidx = 0; 3916 pring->sli.sli3.local_getidx = 0;
3865 pring->cmdidx = 0; 3917 pring->sli.sli3.cmdidx = 0;
3866 pring->missbufcnt = 0; 3918 pring->missbufcnt = 0;
3867 } 3919 }
3868 3920
@@ -4893,16 +4945,15 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4893 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4945 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4894 fcp_eqidx = 0; 4946 fcp_eqidx = 0;
4895 if (phba->sli4_hba.fcp_cq) { 4947 if (phba->sli4_hba.fcp_cq) {
4896 do 4948 do {
4897 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4949 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4898 LPFC_QUEUE_REARM); 4950 LPFC_QUEUE_REARM);
4899 while (++fcp_eqidx < phba->cfg_fcp_eq_count); 4951 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
4900 } 4952 }
4901 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4953 if (phba->sli4_hba.hba_eq) {
4902 if (phba->sli4_hba.fp_eq) { 4954 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
4903 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count;
4904 fcp_eqidx++) 4955 fcp_eqidx++)
4905 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4956 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
4906 LPFC_QUEUE_REARM); 4957 LPFC_QUEUE_REARM);
4907 } 4958 }
4908} 4959}
@@ -7784,14 +7835,18 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7784 * 7835 *
7785 * Return: index into SLI4 fast-path FCP queue index. 7836 * Return: index into SLI4 fast-path FCP queue index.
7786 **/ 7837 **/
7787static uint32_t 7838static inline uint32_t
7788lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 7839lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
7789{ 7840{
7790 ++phba->fcp_qidx; 7841 int i;
7791 if (phba->fcp_qidx >= phba->cfg_fcp_wq_count) 7842
7792 phba->fcp_qidx = 0; 7843 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
7844 i = smp_processor_id();
7845 else
7846 i = atomic_add_return(1, &phba->fcp_qidx);
7793 7847
7794 return phba->fcp_qidx; 7848 i = (i % phba->cfg_fcp_io_channel);
7849 return i;
7795} 7850}
7796 7851
7797/** 7852/**
@@ -8311,16 +8366,6 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8311 8366
8312 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8367 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8313 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8368 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8314 /*
8315 * For FCP command IOCB, get a new WQ index to distribute
8316 * WQE across the WQsr. On the other hand, for abort IOCB,
8317 * it carries the same WQ index to the original command
8318 * IOCB.
8319 */
8320 if (piocb->iocb_flag & LPFC_IO_FCP)
8321 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8322 if (unlikely(!phba->sli4_hba.fcp_wq))
8323 return IOCB_ERROR;
8324 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 8369 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8325 &wqe)) 8370 &wqe))
8326 return IOCB_ERROR; 8371 return IOCB_ERROR;
@@ -8401,13 +8446,68 @@ int
8401lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 8446lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8402 struct lpfc_iocbq *piocb, uint32_t flag) 8447 struct lpfc_iocbq *piocb, uint32_t flag)
8403{ 8448{
8449 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
8450 struct lpfc_sli_ring *pring;
8451 struct lpfc_queue *fpeq;
8452 struct lpfc_eqe *eqe;
8404 unsigned long iflags; 8453 unsigned long iflags;
8405 int rc; 8454 int rc, idx;
8406 8455
8407 spin_lock_irqsave(&phba->hbalock, iflags); 8456 if (phba->sli_rev == LPFC_SLI_REV4) {
8408 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 8457 if (piocb->iocb_flag & LPFC_IO_FCP) {
8409 spin_unlock_irqrestore(&phba->hbalock, iflags); 8458 if (unlikely(!phba->sli4_hba.fcp_wq))
8459 return IOCB_ERROR;
8460 idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8461 piocb->fcp_wqidx = idx;
8462 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
8463
8464 pring = &phba->sli.ring[ring_number];
8465 spin_lock_irqsave(&pring->ring_lock, iflags);
8466 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8467 flag);
8468 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8469
8470 if (lpfc_fcp_look_ahead) {
8471 fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
8472
8473 if (atomic_dec_and_test(&fcp_eq_hdl->
8474 fcp_eq_in_use)) {
8410 8475
8476 /* Get associated EQ with this index */
8477 fpeq = phba->sli4_hba.hba_eq[idx];
8478
8479 /* Turn off interrupts from this EQ */
8480 lpfc_sli4_eq_clr_intr(fpeq);
8481
8482 /*
8483 * Process all the events on FCP EQ
8484 */
8485 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
8486 lpfc_sli4_hba_handle_eqe(phba,
8487 eqe, idx);
8488 fpeq->EQ_processed++;
8489 }
8490
8491 /* Always clear and re-arm the EQ */
8492 lpfc_sli4_eq_release(fpeq,
8493 LPFC_QUEUE_REARM);
8494 }
8495 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
8496 }
8497 } else {
8498 pring = &phba->sli.ring[ring_number];
8499 spin_lock_irqsave(&pring->ring_lock, iflags);
8500 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8501 flag);
8502 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8503
8504 }
8505 } else {
8506 /* For now, SLI2/3 will still use hbalock */
8507 spin_lock_irqsave(&phba->hbalock, iflags);
8508 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8509 spin_unlock_irqrestore(&phba->hbalock, iflags);
8510 }
8411 return rc; 8511 return rc;
8412} 8512}
8413 8513
@@ -8434,18 +8534,18 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
8434 8534
8435 /* Take some away from the FCP ring */ 8535 /* Take some away from the FCP ring */
8436 pring = &psli->ring[psli->fcp_ring]; 8536 pring = &psli->ring[psli->fcp_ring];
8437 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8537 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8438 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8538 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8439 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8539 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8440 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8540 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8441 8541
8442 /* and give them to the extra ring */ 8542 /* and give them to the extra ring */
8443 pring = &psli->ring[psli->extra_ring]; 8543 pring = &psli->ring[psli->extra_ring];
8444 8544
8445 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8545 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8446 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8546 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8447 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8547 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8448 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8548 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8449 8549
8450 /* Setup default profile for this ring */ 8550 /* Setup default profile for this ring */
8451 pring->iotag_max = 4096; 8551 pring->iotag_max = 4096;
@@ -8457,56 +8557,6 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
8457 return 0; 8557 return 0;
8458} 8558}
8459 8559
8460/* lpfc_sli_abts_recover_port - Recover a port that failed an ABTS.
8461 * @vport: pointer to virtual port object.
8462 * @ndlp: nodelist pointer for the impacted rport.
8463 *
8464 * The driver calls this routine in response to a XRI ABORT CQE
8465 * event from the port. In this event, the driver is required to
8466 * recover its login to the rport even though its login may be valid
8467 * from the driver's perspective. The failed ABTS notice from the
8468 * port indicates the rport is not responding.
8469 */
8470static void
8471lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
8472 struct lpfc_nodelist *ndlp)
8473{
8474 struct Scsi_Host *shost;
8475 struct lpfc_hba *phba;
8476 unsigned long flags = 0;
8477
8478 shost = lpfc_shost_from_vport(vport);
8479 phba = vport->phba;
8480 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
8481 lpfc_printf_log(phba, KERN_INFO,
8482 LOG_SLI, "3093 No rport recovery needed. "
8483 "rport in state 0x%x\n",
8484 ndlp->nlp_state);
8485 return;
8486 }
8487 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8488 "3094 Start rport recovery on shost id 0x%x "
8489 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
8490 "flags 0x%x\n",
8491 shost->host_no, ndlp->nlp_DID,
8492 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
8493 ndlp->nlp_flag);
8494 /*
8495 * The rport is not responding. Don't attempt ADISC recovery.
8496 * Remove the FCP-2 flag to force a PLOGI.
8497 */
8498 spin_lock_irqsave(shost->host_lock, flags);
8499 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
8500 spin_unlock_irqrestore(shost->host_lock, flags);
8501 lpfc_disc_state_machine(vport, ndlp, NULL,
8502 NLP_EVT_DEVICE_RECOVERY);
8503 lpfc_cancel_retry_delay_tmo(vport, ndlp);
8504 spin_lock_irqsave(shost->host_lock, flags);
8505 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
8506 spin_unlock_irqrestore(shost->host_lock, flags);
8507 lpfc_disc_start(vport);
8508}
8509
8510/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 8560/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
8511 * @phba: Pointer to HBA context object. 8561 * @phba: Pointer to HBA context object.
8512 * @iocbq: Pointer to iocb object. 8562 * @iocbq: Pointer to iocb object.
@@ -8594,7 +8644,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
8594 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 8644 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
8595 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 8645 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
8596 */ 8646 */
8597 ext_status = axri->parameter & WCQE_PARAM_MASK; 8647 ext_status = axri->parameter & IOERR_PARAM_MASK;
8598 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 8648 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
8599 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 8649 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
8600 lpfc_sli_abts_recover_port(vport, ndlp); 8650 lpfc_sli_abts_recover_port(vport, ndlp);
@@ -8692,7 +8742,9 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8692 struct lpfc_sli *psli = &phba->sli; 8742 struct lpfc_sli *psli = &phba->sli;
8693 struct lpfc_sli_ring *pring; 8743 struct lpfc_sli_ring *pring;
8694 8744
8695 psli->num_rings = MAX_CONFIGURED_RINGS; 8745 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
8746 if (phba->sli_rev == LPFC_SLI_REV4)
8747 psli->num_rings += phba->cfg_fcp_io_channel;
8696 psli->sli_flag = 0; 8748 psli->sli_flag = 0;
8697 psli->fcp_ring = LPFC_FCP_RING; 8749 psli->fcp_ring = LPFC_FCP_RING;
8698 psli->next_ring = LPFC_FCP_NEXT_RING; 8750 psli->next_ring = LPFC_FCP_NEXT_RING;
@@ -8707,16 +8759,20 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8707 switch (i) { 8759 switch (i) {
8708 case LPFC_FCP_RING: /* ring 0 - FCP */ 8760 case LPFC_FCP_RING: /* ring 0 - FCP */
8709 /* numCiocb and numRiocb are used in config_port */ 8761 /* numCiocb and numRiocb are used in config_port */
8710 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 8762 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
8711 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 8763 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
8712 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8764 pring->sli.sli3.numCiocb +=
8713 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8765 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8714 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8766 pring->sli.sli3.numRiocb +=
8715 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8767 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8716 pring->sizeCiocb = (phba->sli_rev == 3) ? 8768 pring->sli.sli3.numCiocb +=
8769 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8770 pring->sli.sli3.numRiocb +=
8771 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8772 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8717 SLI3_IOCB_CMD_SIZE : 8773 SLI3_IOCB_CMD_SIZE :
8718 SLI2_IOCB_CMD_SIZE; 8774 SLI2_IOCB_CMD_SIZE;
8719 pring->sizeRiocb = (phba->sli_rev == 3) ? 8775 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8720 SLI3_IOCB_RSP_SIZE : 8776 SLI3_IOCB_RSP_SIZE :
8721 SLI2_IOCB_RSP_SIZE; 8777 SLI2_IOCB_RSP_SIZE;
8722 pring->iotag_ctr = 0; 8778 pring->iotag_ctr = 0;
@@ -8727,12 +8783,12 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8727 break; 8783 break;
8728 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 8784 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
8729 /* numCiocb and numRiocb are used in config_port */ 8785 /* numCiocb and numRiocb are used in config_port */
8730 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 8786 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
8731 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 8787 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
8732 pring->sizeCiocb = (phba->sli_rev == 3) ? 8788 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8733 SLI3_IOCB_CMD_SIZE : 8789 SLI3_IOCB_CMD_SIZE :
8734 SLI2_IOCB_CMD_SIZE; 8790 SLI2_IOCB_CMD_SIZE;
8735 pring->sizeRiocb = (phba->sli_rev == 3) ? 8791 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8736 SLI3_IOCB_RSP_SIZE : 8792 SLI3_IOCB_RSP_SIZE :
8737 SLI2_IOCB_RSP_SIZE; 8793 SLI2_IOCB_RSP_SIZE;
8738 pring->iotag_max = phba->cfg_hba_queue_depth; 8794 pring->iotag_max = phba->cfg_hba_queue_depth;
@@ -8740,12 +8796,12 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8740 break; 8796 break;
8741 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 8797 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
8742 /* numCiocb and numRiocb are used in config_port */ 8798 /* numCiocb and numRiocb are used in config_port */
8743 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 8799 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
8744 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 8800 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
8745 pring->sizeCiocb = (phba->sli_rev == 3) ? 8801 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
8746 SLI3_IOCB_CMD_SIZE : 8802 SLI3_IOCB_CMD_SIZE :
8747 SLI2_IOCB_CMD_SIZE; 8803 SLI2_IOCB_CMD_SIZE;
8748 pring->sizeRiocb = (phba->sli_rev == 3) ? 8804 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
8749 SLI3_IOCB_RSP_SIZE : 8805 SLI3_IOCB_RSP_SIZE :
8750 SLI2_IOCB_RSP_SIZE; 8806 SLI2_IOCB_RSP_SIZE;
8751 pring->fast_iotag = 0; 8807 pring->fast_iotag = 0;
@@ -8786,8 +8842,9 @@ lpfc_sli_setup(struct lpfc_hba *phba)
8786 lpfc_sli4_ct_abort_unsol_event; 8842 lpfc_sli4_ct_abort_unsol_event;
8787 break; 8843 break;
8788 } 8844 }
8789 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 8845 totiocbsize += (pring->sli.sli3.numCiocb *
8790 (pring->numRiocb * pring->sizeRiocb); 8846 pring->sli.sli3.sizeCiocb) +
8847 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
8791 } 8848 }
8792 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 8849 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
8793 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 8850 /* Too many cmd / rsp ring entries in SLI2 SLIM */
@@ -8828,14 +8885,15 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
8828 for (i = 0; i < psli->num_rings; i++) { 8885 for (i = 0; i < psli->num_rings; i++) {
8829 pring = &psli->ring[i]; 8886 pring = &psli->ring[i];
8830 pring->ringno = i; 8887 pring->ringno = i;
8831 pring->next_cmdidx = 0; 8888 pring->sli.sli3.next_cmdidx = 0;
8832 pring->local_getidx = 0; 8889 pring->sli.sli3.local_getidx = 0;
8833 pring->cmdidx = 0; 8890 pring->sli.sli3.cmdidx = 0;
8834 INIT_LIST_HEAD(&pring->txq); 8891 INIT_LIST_HEAD(&pring->txq);
8835 INIT_LIST_HEAD(&pring->txcmplq); 8892 INIT_LIST_HEAD(&pring->txcmplq);
8836 INIT_LIST_HEAD(&pring->iocb_continueq); 8893 INIT_LIST_HEAD(&pring->iocb_continueq);
8837 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 8894 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
8838 INIT_LIST_HEAD(&pring->postbufq); 8895 INIT_LIST_HEAD(&pring->postbufq);
8896 spin_lock_init(&pring->ring_lock);
8839 } 8897 }
8840 spin_unlock_irq(&phba->hbalock); 8898 spin_unlock_irq(&phba->hbalock);
8841 return 1; 8899 return 1;
@@ -9334,6 +9392,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9334 IOCB_t *icmd = NULL; 9392 IOCB_t *icmd = NULL;
9335 IOCB_t *iabt = NULL; 9393 IOCB_t *iabt = NULL;
9336 int retval; 9394 int retval;
9395 unsigned long iflags;
9337 9396
9338 /* 9397 /*
9339 * There are certain command types we don't want to abort. And we 9398 * There are certain command types we don't want to abort. And we
@@ -9386,7 +9445,17 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9386 iabt->un.acxri.abortIoTag, 9445 iabt->un.acxri.abortIoTag,
9387 iabt->un.acxri.abortContextTag, 9446 iabt->un.acxri.abortContextTag,
9388 abtsiocbp->iotag); 9447 abtsiocbp->iotag);
9389 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); 9448
9449 if (phba->sli_rev == LPFC_SLI_REV4) {
9450 /* Note: both hbalock and ring_lock need to be set here */
9451 spin_lock_irqsave(&pring->ring_lock, iflags);
9452 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9453 abtsiocbp, 0);
9454 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9455 } else {
9456 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9457 abtsiocbp, 0);
9458 }
9390 9459
9391 if (retval) 9460 if (retval)
9392 __lpfc_sli_release_iocbq(phba, abtsiocbp); 9461 __lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -10947,12 +11016,12 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
10947 unsigned long iflags; 11016 unsigned long iflags;
10948 11017
10949 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 11018 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
10950 spin_lock_irqsave(&phba->hbalock, iflags); 11019 spin_lock_irqsave(&pring->ring_lock, iflags);
10951 pring->stats.iocb_event++; 11020 pring->stats.iocb_event++;
10952 /* Look up the ELS command IOCB and create pseudo response IOCB */ 11021 /* Look up the ELS command IOCB and create pseudo response IOCB */
10953 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 11022 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
10954 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11023 bf_get(lpfc_wcqe_c_request_tag, wcqe));
10955 spin_unlock_irqrestore(&phba->hbalock, iflags); 11024 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10956 11025
10957 if (unlikely(!cmdiocbq)) { 11026 if (unlikely(!cmdiocbq)) {
10958 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11027 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
@@ -11154,6 +11223,7 @@ lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
11154/** 11223/**
11155 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 11224 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
11156 * @phba: Pointer to HBA context object. 11225 * @phba: Pointer to HBA context object.
11226 * @cq: Pointer to associated CQ
11157 * @wcqe: Pointer to work-queue completion queue entry. 11227 * @wcqe: Pointer to work-queue completion queue entry.
11158 * 11228 *
11159 * This routine handles an ELS work-queue completion event. 11229 * This routine handles an ELS work-queue completion event.
@@ -11161,12 +11231,12 @@ lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
11161 * Return: true if work posted to worker thread, otherwise false. 11231 * Return: true if work posted to worker thread, otherwise false.
11162 **/ 11232 **/
11163static bool 11233static bool
11164lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, 11234lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11165 struct lpfc_wcqe_complete *wcqe) 11235 struct lpfc_wcqe_complete *wcqe)
11166{ 11236{
11167 struct lpfc_iocbq *irspiocbq; 11237 struct lpfc_iocbq *irspiocbq;
11168 unsigned long iflags; 11238 unsigned long iflags;
11169 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 11239 struct lpfc_sli_ring *pring = cq->pring;
11170 11240
11171 /* Get an irspiocbq for later ELS response processing use */ 11241 /* Get an irspiocbq for later ELS response processing use */
11172 irspiocbq = lpfc_sli_get_iocbq(phba); 11242 irspiocbq = lpfc_sli_get_iocbq(phba);
@@ -11311,14 +11381,17 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
11311 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 11381 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
11312 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11382 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11313 "2537 Receive Frame Truncated!!\n"); 11383 "2537 Receive Frame Truncated!!\n");
11384 hrq->RQ_buf_trunc++;
11314 case FC_STATUS_RQ_SUCCESS: 11385 case FC_STATUS_RQ_SUCCESS:
11315 lpfc_sli4_rq_release(hrq, drq); 11386 lpfc_sli4_rq_release(hrq, drq);
11316 spin_lock_irqsave(&phba->hbalock, iflags); 11387 spin_lock_irqsave(&phba->hbalock, iflags);
11317 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 11388 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
11318 if (!dma_buf) { 11389 if (!dma_buf) {
11390 hrq->RQ_no_buf_found++;
11319 spin_unlock_irqrestore(&phba->hbalock, iflags); 11391 spin_unlock_irqrestore(&phba->hbalock, iflags);
11320 goto out; 11392 goto out;
11321 } 11393 }
11394 hrq->RQ_rcv_buf++;
11322 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 11395 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
11323 /* save off the frame for the word thread to process */ 11396 /* save off the frame for the word thread to process */
11324 list_add_tail(&dma_buf->cq_event.list, 11397 list_add_tail(&dma_buf->cq_event.list,
@@ -11330,6 +11403,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
11330 break; 11403 break;
11331 case FC_STATUS_INSUFF_BUF_NEED_BUF: 11404 case FC_STATUS_INSUFF_BUF_NEED_BUF:
11332 case FC_STATUS_INSUFF_BUF_FRM_DISC: 11405 case FC_STATUS_INSUFF_BUF_FRM_DISC:
11406 hrq->RQ_no_posted_buf++;
11333 /* Post more buffers if possible */ 11407 /* Post more buffers if possible */
11334 spin_lock_irqsave(&phba->hbalock, iflags); 11408 spin_lock_irqsave(&phba->hbalock, iflags);
11335 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 11409 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
@@ -11367,7 +11441,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11367 case CQE_CODE_COMPL_WQE: 11441 case CQE_CODE_COMPL_WQE:
11368 /* Process the WQ/RQ complete event */ 11442 /* Process the WQ/RQ complete event */
11369 phba->last_completion_time = jiffies; 11443 phba->last_completion_time = jiffies;
11370 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, 11444 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
11371 (struct lpfc_wcqe_complete *)&cqevt); 11445 (struct lpfc_wcqe_complete *)&cqevt);
11372 break; 11446 break;
11373 case CQE_CODE_RELEASE_WQE: 11447 case CQE_CODE_RELEASE_WQE:
@@ -11411,31 +11485,18 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11411 * 11485 *
11412 **/ 11486 **/
11413static void 11487static void
11414lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 11488lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11489 struct lpfc_queue *speq)
11415{ 11490{
11416 struct lpfc_queue *cq = NULL, *childq, *speq; 11491 struct lpfc_queue *cq = NULL, *childq;
11417 struct lpfc_cqe *cqe; 11492 struct lpfc_cqe *cqe;
11418 bool workposted = false; 11493 bool workposted = false;
11419 int ecount = 0; 11494 int ecount = 0;
11420 uint16_t cqid; 11495 uint16_t cqid;
11421 11496
11422 if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
11423 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11424 "0359 Not a valid slow-path completion "
11425 "event: majorcode=x%x, minorcode=x%x\n",
11426 bf_get_le32(lpfc_eqe_major_code, eqe),
11427 bf_get_le32(lpfc_eqe_minor_code, eqe));
11428 return;
11429 }
11430
11431 /* Get the reference to the corresponding CQ */ 11497 /* Get the reference to the corresponding CQ */
11432 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 11498 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11433 11499
11434 /* Search for completion queue pointer matching this cqid */
11435 speq = phba->sli4_hba.sp_eq;
11436 /* sanity check on queue memory */
11437 if (unlikely(!speq))
11438 return;
11439 list_for_each_entry(childq, &speq->child_list, list) { 11500 list_for_each_entry(childq, &speq->child_list, list) {
11440 if (childq->queue_id == cqid) { 11501 if (childq->queue_id == cqid) {
11441 cq = childq; 11502 cq = childq;
@@ -11457,6 +11518,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
11457 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 11518 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
11458 if (!(++ecount % cq->entry_repost)) 11519 if (!(++ecount % cq->entry_repost))
11459 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11520 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11521 cq->CQ_mbox++;
11460 } 11522 }
11461 break; 11523 break;
11462 case LPFC_WCQ: 11524 case LPFC_WCQ:
@@ -11470,6 +11532,10 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
11470 if (!(++ecount % cq->entry_repost)) 11532 if (!(++ecount % cq->entry_repost))
11471 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11533 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11472 } 11534 }
11535
11536 /* Track the max number of CQEs processed in 1 EQ */
11537 if (ecount > cq->CQ_max_cqe)
11538 cq->CQ_max_cqe = ecount;
11473 break; 11539 break;
11474 default: 11540 default:
11475 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11541 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -11494,34 +11560,33 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
11494 11560
11495/** 11561/**
11496 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 11562 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
11497 * @eqe: Pointer to fast-path completion queue entry. 11563 * @phba: Pointer to HBA context object.
11564 * @cq: Pointer to associated CQ
11565 * @wcqe: Pointer to work-queue completion queue entry.
11498 * 11566 *
11499 * This routine process a fast-path work queue completion entry from fast-path 11567 * This routine process a fast-path work queue completion entry from fast-path
11500 * event queue for FCP command response completion. 11568 * event queue for FCP command response completion.
11501 **/ 11569 **/
11502static void 11570static void
11503lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, 11571lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11504 struct lpfc_wcqe_complete *wcqe) 11572 struct lpfc_wcqe_complete *wcqe)
11505{ 11573{
11506 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 11574 struct lpfc_sli_ring *pring = cq->pring;
11507 struct lpfc_iocbq *cmdiocbq; 11575 struct lpfc_iocbq *cmdiocbq;
11508 struct lpfc_iocbq irspiocbq; 11576 struct lpfc_iocbq irspiocbq;
11509 unsigned long iflags; 11577 unsigned long iflags;
11510 11578
11511 spin_lock_irqsave(&phba->hbalock, iflags);
11512 pring->stats.iocb_event++;
11513 spin_unlock_irqrestore(&phba->hbalock, iflags);
11514
11515 /* Check for response status */ 11579 /* Check for response status */
11516 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 11580 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
11517 /* If resource errors reported from HBA, reduce queue 11581 /* If resource errors reported from HBA, reduce queue
11518 * depth of the SCSI device. 11582 * depth of the SCSI device.
11519 */ 11583 */
11520 if ((bf_get(lpfc_wcqe_c_status, wcqe) == 11584 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
11521 IOSTAT_LOCAL_REJECT) && 11585 IOSTAT_LOCAL_REJECT)) &&
11522 (wcqe->parameter == IOERR_NO_RESOURCES)) { 11586 ((wcqe->parameter & IOERR_PARAM_MASK) ==
11587 IOERR_NO_RESOURCES))
11523 phba->lpfc_rampdown_queue_depth(phba); 11588 phba->lpfc_rampdown_queue_depth(phba);
11524 } 11589
11525 /* Log the error status */ 11590 /* Log the error status */
11526 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11591 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11527 "0373 FCP complete error: status=x%x, " 11592 "0373 FCP complete error: status=x%x, "
@@ -11534,10 +11599,11 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
11534 } 11599 }
11535 11600
11536 /* Look up the FCP command IOCB and create pseudo response IOCB */ 11601 /* Look up the FCP command IOCB and create pseudo response IOCB */
11537 spin_lock_irqsave(&phba->hbalock, iflags); 11602 spin_lock_irqsave(&pring->ring_lock, iflags);
11603 pring->stats.iocb_event++;
11538 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 11604 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11539 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11605 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11540 spin_unlock_irqrestore(&phba->hbalock, iflags); 11606 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11541 if (unlikely(!cmdiocbq)) { 11607 if (unlikely(!cmdiocbq)) {
11542 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11608 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11543 "0374 FCP complete with no corresponding " 11609 "0374 FCP complete with no corresponding "
@@ -11621,17 +11687,20 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11621 /* Check and process for different type of WCQE and dispatch */ 11687 /* Check and process for different type of WCQE and dispatch */
11622 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 11688 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
11623 case CQE_CODE_COMPL_WQE: 11689 case CQE_CODE_COMPL_WQE:
11690 cq->CQ_wq++;
11624 /* Process the WQ complete event */ 11691 /* Process the WQ complete event */
11625 phba->last_completion_time = jiffies; 11692 phba->last_completion_time = jiffies;
11626 lpfc_sli4_fp_handle_fcp_wcqe(phba, 11693 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
11627 (struct lpfc_wcqe_complete *)&wcqe); 11694 (struct lpfc_wcqe_complete *)&wcqe);
11628 break; 11695 break;
11629 case CQE_CODE_RELEASE_WQE: 11696 case CQE_CODE_RELEASE_WQE:
11697 cq->CQ_release_wqe++;
11630 /* Process the WQ release event */ 11698 /* Process the WQ release event */
11631 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 11699 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
11632 (struct lpfc_wcqe_release *)&wcqe); 11700 (struct lpfc_wcqe_release *)&wcqe);
11633 break; 11701 break;
11634 case CQE_CODE_XRI_ABORTED: 11702 case CQE_CODE_XRI_ABORTED:
11703 cq->CQ_xri_aborted++;
11635 /* Process the WQ XRI abort event */ 11704 /* Process the WQ XRI abort event */
11636 phba->last_completion_time = jiffies; 11705 phba->last_completion_time = jiffies;
11637 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 11706 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
@@ -11647,7 +11716,7 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11647} 11716}
11648 11717
11649/** 11718/**
11650 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry 11719 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
11651 * @phba: Pointer to HBA context object. 11720 * @phba: Pointer to HBA context object.
11652 * @eqe: Pointer to fast-path event queue entry. 11721 * @eqe: Pointer to fast-path event queue entry.
11653 * 11722 *
@@ -11659,8 +11728,8 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11659 * completion queue, and then return. 11728 * completion queue, and then return.
11660 **/ 11729 **/
11661static void 11730static void
11662lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 11731lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11663 uint32_t fcp_cqidx) 11732 uint32_t qidx)
11664{ 11733{
11665 struct lpfc_queue *cq; 11734 struct lpfc_queue *cq;
11666 struct lpfc_cqe *cqe; 11735 struct lpfc_cqe *cqe;
@@ -11670,30 +11739,38 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11670 11739
11671 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 11740 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
11672 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11741 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11673 "0366 Not a valid fast-path completion " 11742 "0366 Not a valid completion "
11674 "event: majorcode=x%x, minorcode=x%x\n", 11743 "event: majorcode=x%x, minorcode=x%x\n",
11675 bf_get_le32(lpfc_eqe_major_code, eqe), 11744 bf_get_le32(lpfc_eqe_major_code, eqe),
11676 bf_get_le32(lpfc_eqe_minor_code, eqe)); 11745 bf_get_le32(lpfc_eqe_minor_code, eqe));
11677 return; 11746 return;
11678 } 11747 }
11679 11748
11749 /* Get the reference to the corresponding CQ */
11750 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11751
11752 /* Check if this is a Slow path event */
11753 if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
11754 lpfc_sli4_sp_handle_eqe(phba, eqe,
11755 phba->sli4_hba.hba_eq[qidx]);
11756 return;
11757 }
11758
11680 if (unlikely(!phba->sli4_hba.fcp_cq)) { 11759 if (unlikely(!phba->sli4_hba.fcp_cq)) {
11681 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11760 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11682 "3146 Fast-path completion queues " 11761 "3146 Fast-path completion queues "
11683 "does not exist\n"); 11762 "does not exist\n");
11684 return; 11763 return;
11685 } 11764 }
11686 cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 11765 cq = phba->sli4_hba.fcp_cq[qidx];
11687 if (unlikely(!cq)) { 11766 if (unlikely(!cq)) {
11688 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 11767 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11689 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11768 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11690 "0367 Fast-path completion queue " 11769 "0367 Fast-path completion queue "
11691 "(%d) does not exist\n", fcp_cqidx); 11770 "(%d) does not exist\n", qidx);
11692 return; 11771 return;
11693 } 11772 }
11694 11773
11695 /* Get the reference to the corresponding CQ */
11696 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
11697 if (unlikely(cqid != cq->queue_id)) { 11774 if (unlikely(cqid != cq->queue_id)) {
11698 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11775 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11699 "0368 Miss-matched fast-path completion " 11776 "0368 Miss-matched fast-path completion "
@@ -11709,6 +11786,10 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11709 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11786 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11710 } 11787 }
11711 11788
11789 /* Track the max number of CQEs processed in 1 EQ */
11790 if (ecount > cq->CQ_max_cqe)
11791 cq->CQ_max_cqe = ecount;
11792
11712 /* Catch the no cq entry condition */ 11793 /* Catch the no cq entry condition */
11713 if (unlikely(ecount == 0)) 11794 if (unlikely(ecount == 0))
11714 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11795 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -11737,86 +11818,7 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
11737} 11818}
11738 11819
11739/** 11820/**
11740 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device 11821 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
11741 * @irq: Interrupt number.
11742 * @dev_id: The device context pointer.
11743 *
11744 * This function is directly called from the PCI layer as an interrupt
11745 * service routine when device with SLI-4 interface spec is enabled with
11746 * MSI-X multi-message interrupt mode and there are slow-path events in
11747 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
11748 * interrupt mode, this function is called as part of the device-level
11749 * interrupt handler. When the PCI slot is in error recovery or the HBA is
11750 * undergoing initialization, the interrupt handler will not process the
11751 * interrupt. The link attention and ELS ring attention events are handled
11752 * by the worker thread. The interrupt handler signals the worker thread
11753 * and returns for these events. This function is called without any lock
11754 * held. It gets the hbalock to access and update SLI data structures.
11755 *
11756 * This function returns IRQ_HANDLED when interrupt is handled else it
11757 * returns IRQ_NONE.
11758 **/
11759irqreturn_t
11760lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
11761{
11762 struct lpfc_hba *phba;
11763 struct lpfc_queue *speq;
11764 struct lpfc_eqe *eqe;
11765 unsigned long iflag;
11766 int ecount = 0;
11767
11768 /*
11769 * Get the driver's phba structure from the dev_id
11770 */
11771 phba = (struct lpfc_hba *)dev_id;
11772
11773 if (unlikely(!phba))
11774 return IRQ_NONE;
11775
11776 /* Get to the EQ struct associated with this vector */
11777 speq = phba->sli4_hba.sp_eq;
11778 if (unlikely(!speq))
11779 return IRQ_NONE;
11780
11781 /* Check device state for handling interrupt */
11782 if (unlikely(lpfc_intr_state_check(phba))) {
11783 /* Check again for link_state with lock held */
11784 spin_lock_irqsave(&phba->hbalock, iflag);
11785 if (phba->link_state < LPFC_LINK_DOWN)
11786 /* Flush, clear interrupt, and rearm the EQ */
11787 lpfc_sli4_eq_flush(phba, speq);
11788 spin_unlock_irqrestore(&phba->hbalock, iflag);
11789 return IRQ_NONE;
11790 }
11791
11792 /*
11793 * Process all the event on FCP slow-path EQ
11794 */
11795 while ((eqe = lpfc_sli4_eq_get(speq))) {
11796 lpfc_sli4_sp_handle_eqe(phba, eqe);
11797 if (!(++ecount % speq->entry_repost))
11798 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
11799 }
11800
11801 /* Always clear and re-arm the slow-path EQ */
11802 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
11803
11804 /* Catch the no cq entry condition */
11805 if (unlikely(ecount == 0)) {
11806 if (phba->intr_type == MSIX)
11807 /* MSI-X treated interrupt served as no EQ share INT */
11808 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11809 "0357 MSI-X interrupt with no EQE\n");
11810 else
11811 /* Non MSI-X treated on interrupt as EQ share INT */
11812 return IRQ_NONE;
11813 }
11814
11815 return IRQ_HANDLED;
11816} /* lpfc_sli4_sp_intr_handler */
11817
11818/**
11819 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
11820 * @irq: Interrupt number. 11822 * @irq: Interrupt number.
11821 * @dev_id: The device context pointer. 11823 * @dev_id: The device context pointer.
11822 * 11824 *
@@ -11833,11 +11835,16 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
11833 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 11835 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
11834 * equal to that of FCP CQ index. 11836 * equal to that of FCP CQ index.
11835 * 11837 *
11838 * The link attention and ELS ring attention events are handled
11839 * by the worker thread. The interrupt handler signals the worker thread
11840 * and returns for these events. This function is called without any lock
11841 * held. It gets the hbalock to access and update SLI data structures.
11842 *
11836 * This function returns IRQ_HANDLED when interrupt is handled else it 11843 * This function returns IRQ_HANDLED when interrupt is handled else it
11837 * returns IRQ_NONE. 11844 * returns IRQ_NONE.
11838 **/ 11845 **/
11839irqreturn_t 11846irqreturn_t
11840lpfc_sli4_fp_intr_handler(int irq, void *dev_id) 11847lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
11841{ 11848{
11842 struct lpfc_hba *phba; 11849 struct lpfc_hba *phba;
11843 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 11850 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
@@ -11854,22 +11861,34 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11854 11861
11855 if (unlikely(!phba)) 11862 if (unlikely(!phba))
11856 return IRQ_NONE; 11863 return IRQ_NONE;
11857 if (unlikely(!phba->sli4_hba.fp_eq)) 11864 if (unlikely(!phba->sli4_hba.hba_eq))
11858 return IRQ_NONE; 11865 return IRQ_NONE;
11859 11866
11860 /* Get to the EQ struct associated with this vector */ 11867 /* Get to the EQ struct associated with this vector */
11861 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 11868 fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
11862 if (unlikely(!fpeq)) 11869 if (unlikely(!fpeq))
11863 return IRQ_NONE; 11870 return IRQ_NONE;
11864 11871
11872 if (lpfc_fcp_look_ahead) {
11873 if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
11874 lpfc_sli4_eq_clr_intr(fpeq);
11875 else {
11876 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11877 return IRQ_NONE;
11878 }
11879 }
11880
11865 /* Check device state for handling interrupt */ 11881 /* Check device state for handling interrupt */
11866 if (unlikely(lpfc_intr_state_check(phba))) { 11882 if (unlikely(lpfc_intr_state_check(phba))) {
11883 fpeq->EQ_badstate++;
11867 /* Check again for link_state with lock held */ 11884 /* Check again for link_state with lock held */
11868 spin_lock_irqsave(&phba->hbalock, iflag); 11885 spin_lock_irqsave(&phba->hbalock, iflag);
11869 if (phba->link_state < LPFC_LINK_DOWN) 11886 if (phba->link_state < LPFC_LINK_DOWN)
11870 /* Flush, clear interrupt, and rearm the EQ */ 11887 /* Flush, clear interrupt, and rearm the EQ */
11871 lpfc_sli4_eq_flush(phba, fpeq); 11888 lpfc_sli4_eq_flush(phba, fpeq);
11872 spin_unlock_irqrestore(&phba->hbalock, iflag); 11889 spin_unlock_irqrestore(&phba->hbalock, iflag);
11890 if (lpfc_fcp_look_ahead)
11891 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11873 return IRQ_NONE; 11892 return IRQ_NONE;
11874 } 11893 }
11875 11894
@@ -11877,15 +11896,27 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11877 * Process all the event on FCP fast-path EQ 11896 * Process all the event on FCP fast-path EQ
11878 */ 11897 */
11879 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 11898 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
11880 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); 11899 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
11881 if (!(++ecount % fpeq->entry_repost)) 11900 if (!(++ecount % fpeq->entry_repost))
11882 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 11901 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
11902 fpeq->EQ_processed++;
11883 } 11903 }
11884 11904
11905 /* Track the max number of EQEs processed in 1 intr */
11906 if (ecount > fpeq->EQ_max_eqe)
11907 fpeq->EQ_max_eqe = ecount;
11908
11885 /* Always clear and re-arm the fast-path EQ */ 11909 /* Always clear and re-arm the fast-path EQ */
11886 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 11910 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
11887 11911
11888 if (unlikely(ecount == 0)) { 11912 if (unlikely(ecount == 0)) {
11913 fpeq->EQ_no_entry++;
11914
11915 if (lpfc_fcp_look_ahead) {
11916 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11917 return IRQ_NONE;
11918 }
11919
11889 if (phba->intr_type == MSIX) 11920 if (phba->intr_type == MSIX)
11890 /* MSI-X treated interrupt served as no EQ share INT */ 11921 /* MSI-X treated interrupt served as no EQ share INT */
11891 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11922 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
@@ -11895,6 +11926,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11895 return IRQ_NONE; 11926 return IRQ_NONE;
11896 } 11927 }
11897 11928
11929 if (lpfc_fcp_look_ahead)
11930 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
11898 return IRQ_HANDLED; 11931 return IRQ_HANDLED;
11899} /* lpfc_sli4_fp_intr_handler */ 11932} /* lpfc_sli4_fp_intr_handler */
11900 11933
@@ -11919,8 +11952,8 @@ irqreturn_t
11919lpfc_sli4_intr_handler(int irq, void *dev_id) 11952lpfc_sli4_intr_handler(int irq, void *dev_id)
11920{ 11953{
11921 struct lpfc_hba *phba; 11954 struct lpfc_hba *phba;
11922 irqreturn_t sp_irq_rc, fp_irq_rc; 11955 irqreturn_t hba_irq_rc;
11923 bool fp_handled = false; 11956 bool hba_handled = false;
11924 uint32_t fcp_eqidx; 11957 uint32_t fcp_eqidx;
11925 11958
11926 /* Get the driver's phba structure from the dev_id */ 11959 /* Get the driver's phba structure from the dev_id */
@@ -11930,21 +11963,16 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
11930 return IRQ_NONE; 11963 return IRQ_NONE;
11931 11964
11932 /* 11965 /*
11933 * Invokes slow-path host attention interrupt handling as appropriate.
11934 */
11935 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
11936
11937 /*
11938 * Invoke fast-path host attention interrupt handling as appropriate. 11966 * Invoke fast-path host attention interrupt handling as appropriate.
11939 */ 11967 */
11940 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 11968 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
11941 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, 11969 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
11942 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); 11970 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
11943 if (fp_irq_rc == IRQ_HANDLED) 11971 if (hba_irq_rc == IRQ_HANDLED)
11944 fp_handled |= true; 11972 hba_handled |= true;
11945 } 11973 }
11946 11974
11947 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; 11975 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
11948} /* lpfc_sli4_intr_handler */ 11976} /* lpfc_sli4_intr_handler */
11949 11977
11950/** 11978/**
@@ -12075,7 +12103,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12075 union lpfc_sli4_cfg_shdr *shdr; 12103 union lpfc_sli4_cfg_shdr *shdr;
12076 uint16_t dmult; 12104 uint16_t dmult;
12077 12105
12078 if (startq >= phba->cfg_fcp_eq_count) 12106 if (startq >= phba->cfg_fcp_io_channel)
12079 return 0; 12107 return 0;
12080 12108
12081 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12109 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -12089,12 +12117,13 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12089 eq_delay = &mbox->u.mqe.un.eq_delay; 12117 eq_delay = &mbox->u.mqe.un.eq_delay;
12090 12118
12091 /* Calculate delay multiper from maximum interrupt per second */ 12119 /* Calculate delay multiper from maximum interrupt per second */
12092 dmult = LPFC_DMULT_CONST/phba->cfg_fcp_imax - 1; 12120 dmult = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
12121 dmult = LPFC_DMULT_CONST/dmult - 1;
12093 12122
12094 cnt = 0; 12123 cnt = 0;
12095 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_eq_count; 12124 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
12096 fcp_eqidx++) { 12125 fcp_eqidx++) {
12097 eq = phba->sli4_hba.fp_eq[fcp_eqidx]; 12126 eq = phba->sli4_hba.hba_eq[fcp_eqidx];
12098 if (!eq) 12127 if (!eq)
12099 continue; 12128 continue;
12100 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 12129 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 2626f58c074..2f48d000a3b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -131,7 +131,9 @@ typedef struct lpfcMboxq {
131 131
132#define LPFC_MAX_RING_MASK 5 /* max num of rctl/type masks allowed per 132#define LPFC_MAX_RING_MASK 5 /* max num of rctl/type masks allowed per
133 ring */ 133 ring */
134#define LPFC_MAX_RING 4 /* max num of SLI rings used by driver */ 134#define LPFC_SLI3_MAX_RING 4 /* Max num of SLI3 rings used by driver.
135 For SLI4, an additional ring for each
136 FCP WQ will be allocated. */
135 137
136struct lpfc_sli_ring; 138struct lpfc_sli_ring;
137 139
@@ -158,6 +160,24 @@ struct lpfc_sli_ring_stat {
158 uint64_t iocb_rsp_full; /* IOCB rsp ring full */ 160 uint64_t iocb_rsp_full; /* IOCB rsp ring full */
159}; 161};
160 162
163struct lpfc_sli3_ring {
164 uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */
165 uint32_t next_cmdidx; /* next_cmd index */
166 uint32_t rspidx; /* current index in response ring */
167 uint32_t cmdidx; /* current index in command ring */
168 uint16_t numCiocb; /* number of command iocb's per ring */
169 uint16_t numRiocb; /* number of rsp iocb's per ring */
170 uint16_t sizeCiocb; /* Size of command iocb's in this ring */
171 uint16_t sizeRiocb; /* Size of response iocb's in this ring */
172 uint32_t *cmdringaddr; /* virtual address for cmd rings */
173 uint32_t *rspringaddr; /* virtual address for rsp rings */
174};
175
176struct lpfc_sli4_ring {
177 struct lpfc_queue *wqp; /* Pointer to associated WQ */
178};
179
180
161/* Structure used to hold SLI ring information */ 181/* Structure used to hold SLI ring information */
162struct lpfc_sli_ring { 182struct lpfc_sli_ring {
163 uint16_t flag; /* ring flags */ 183 uint16_t flag; /* ring flags */
@@ -166,16 +186,10 @@ struct lpfc_sli_ring {
166#define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */ 186#define LPFC_STOP_IOCB_EVENT 0x020 /* Stop processing IOCB cmds event */
167 uint16_t abtsiotag; /* tracks next iotag to use for ABTS */ 187 uint16_t abtsiotag; /* tracks next iotag to use for ABTS */
168 188
169 uint32_t local_getidx; /* last available cmd index (from cmdGetInx) */
170 uint32_t next_cmdidx; /* next_cmd index */
171 uint32_t rspidx; /* current index in response ring */
172 uint32_t cmdidx; /* current index in command ring */
173 uint8_t rsvd; 189 uint8_t rsvd;
174 uint8_t ringno; /* ring number */ 190 uint8_t ringno; /* ring number */
175 uint16_t numCiocb; /* number of command iocb's per ring */ 191
176 uint16_t numRiocb; /* number of rsp iocb's per ring */ 192 spinlock_t ring_lock; /* lock for issuing commands */
177 uint16_t sizeCiocb; /* Size of command iocb's in this ring */
178 uint16_t sizeRiocb; /* Size of response iocb's in this ring */
179 193
180 uint32_t fast_iotag; /* max fastlookup based iotag */ 194 uint32_t fast_iotag; /* max fastlookup based iotag */
181 uint32_t iotag_ctr; /* keeps track of the next iotag to use */ 195 uint32_t iotag_ctr; /* keeps track of the next iotag to use */
@@ -186,8 +200,6 @@ struct lpfc_sli_ring {
186 struct list_head txcmplq; 200 struct list_head txcmplq;
187 uint16_t txcmplq_cnt; /* current length of queue */ 201 uint16_t txcmplq_cnt; /* current length of queue */
188 uint16_t txcmplq_max; /* max length */ 202 uint16_t txcmplq_max; /* max length */
189 uint32_t *cmdringaddr; /* virtual address for cmd rings */
190 uint32_t *rspringaddr; /* virtual address for rsp rings */
191 uint32_t missbufcnt; /* keep track of buffers to post */ 203 uint32_t missbufcnt; /* keep track of buffers to post */
192 struct list_head postbufq; 204 struct list_head postbufq;
193 uint16_t postbufq_cnt; /* current length of queue */ 205 uint16_t postbufq_cnt; /* current length of queue */
@@ -207,6 +219,10 @@ struct lpfc_sli_ring {
207 /* cmd ring available */ 219 /* cmd ring available */
208 void (*lpfc_sli_cmd_available) (struct lpfc_hba *, 220 void (*lpfc_sli_cmd_available) (struct lpfc_hba *,
209 struct lpfc_sli_ring *); 221 struct lpfc_sli_ring *);
222 union {
223 struct lpfc_sli3_ring sli3;
224 struct lpfc_sli4_ring sli4;
225 } sli;
210}; 226};
211 227
212/* Structure used for configuring rings to a specific profile or rctl / type */ 228/* Structure used for configuring rings to a specific profile or rctl / type */
@@ -239,6 +255,8 @@ struct lpfc_sli_stat {
239 uint64_t mbox_stat_err; /* Mbox cmds completed status error */ 255 uint64_t mbox_stat_err; /* Mbox cmds completed status error */
240 uint64_t mbox_cmd; /* Mailbox commands issued */ 256 uint64_t mbox_cmd; /* Mailbox commands issued */
241 uint64_t sli_intr; /* Count of Host Attention interrupts */ 257 uint64_t sli_intr; /* Count of Host Attention interrupts */
258 uint64_t sli_prev_intr; /* Previous cnt of Host Attention interrupts */
259 uint64_t sli_ips; /* Host Attention interrupts per sec */
242 uint32_t err_attn_event; /* Error Attn event counters */ 260 uint32_t err_attn_event; /* Error Attn event counters */
243 uint32_t link_event; /* Link event counters */ 261 uint32_t link_event; /* Link event counters */
244 uint32_t mbox_event; /* Mailbox event counters */ 262 uint32_t mbox_event; /* Mailbox event counters */
@@ -270,7 +288,7 @@ struct lpfc_sli {
270#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ 288#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
271#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */ 289#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
272 290
273 struct lpfc_sli_ring ring[LPFC_MAX_RING]; 291 struct lpfc_sli_ring *ring;
274 int fcp_ring; /* ring used for FCP initiator commands */ 292 int fcp_ring; /* ring used for FCP initiator commands */
275 int next_ring; 293 int next_ring;
276 294
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index ec756118c5c..bd4bc4342ae 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -34,18 +34,10 @@
34/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ 34/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
35#define LPFC_NEMBED_MBOX_SGL_CNT 254 35#define LPFC_NEMBED_MBOX_SGL_CNT 254
36 36
37/* Multi-queue arrangement for fast-path FCP work queues */ 37/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
38#define LPFC_FN_EQN_MAX 8 38#define LPFC_FCP_IO_CHAN_DEF 4
39#define LPFC_SP_EQN_DEF 1 39#define LPFC_FCP_IO_CHAN_MIN 1
40#define LPFC_FP_EQN_DEF 4 40#define LPFC_FCP_IO_CHAN_MAX 8
41#define LPFC_FP_EQN_MIN 1
42#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
43
44#define LPFC_FN_WQN_MAX 32
45#define LPFC_SP_WQN_DEF 1
46#define LPFC_FP_WQN_DEF 4
47#define LPFC_FP_WQN_MIN 1
48#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
49 41
50/* 42/*
51 * Provide the default FCF Record attributes used by the driver 43 * Provide the default FCF Record attributes used by the driver
@@ -141,6 +133,37 @@ struct lpfc_queue {
141 uint32_t page_count; /* Number of pages allocated for this queue */ 133 uint32_t page_count; /* Number of pages allocated for this queue */
142 uint32_t host_index; /* The host's index for putting or getting */ 134 uint32_t host_index; /* The host's index for putting or getting */
143 uint32_t hba_index; /* The last known hba index for get or put */ 135 uint32_t hba_index; /* The last known hba index for get or put */
136
137 struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
138
139 /* For q stats */
140 uint32_t q_cnt_1;
141 uint32_t q_cnt_2;
142 uint32_t q_cnt_3;
143 uint64_t q_cnt_4;
144/* defines for EQ stats */
145#define EQ_max_eqe q_cnt_1
146#define EQ_no_entry q_cnt_2
147#define EQ_badstate q_cnt_3
148#define EQ_processed q_cnt_4
149
150/* defines for CQ stats */
151#define CQ_mbox q_cnt_1
152#define CQ_max_cqe q_cnt_1
153#define CQ_release_wqe q_cnt_2
154#define CQ_xri_aborted q_cnt_3
155#define CQ_wq q_cnt_4
156
157/* defines for WQ stats */
158#define WQ_overflow q_cnt_1
159#define WQ_posted q_cnt_4
160
161/* defines for RQ stats */
162#define RQ_no_posted_buf q_cnt_1
163#define RQ_no_buf_found q_cnt_2
164#define RQ_buf_trunc q_cnt_3
165#define RQ_rcv_buf q_cnt_4
166
144 union sli4_qe qe[1]; /* array to index entries (must be last) */ 167 union sli4_qe qe[1]; /* array to index entries (must be last) */
145}; 168};
146 169
@@ -350,6 +373,7 @@ struct lpfc_hba;
350struct lpfc_fcp_eq_hdl { 373struct lpfc_fcp_eq_hdl {
351 uint32_t idx; 374 uint32_t idx;
352 struct lpfc_hba *phba; 375 struct lpfc_hba *phba;
376 atomic_t fcp_eq_in_use;
353}; 377};
354 378
355/* Port Capabilities for SLI4 Parameters */ 379/* Port Capabilities for SLI4 Parameters */
@@ -407,6 +431,8 @@ struct lpfc_sli4_lnk_info {
407 uint8_t lnk_no; 431 uint8_t lnk_no;
408}; 432};
409 433
434#define LPFC_SLI4_HANDLER_NAME_SZ 16
435
410/* SLI4 HBA data structure entries */ 436/* SLI4 HBA data structure entries */
411struct lpfc_sli4_hba { 437struct lpfc_sli4_hba {
412 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for 438 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -463,20 +489,23 @@ struct lpfc_sli4_hba {
463 struct lpfc_register sli_intf; 489 struct lpfc_register sli_intf;
464 struct lpfc_pc_sli4_params pc_sli4_params; 490 struct lpfc_pc_sli4_params pc_sli4_params;
465 struct msix_entry *msix_entries; 491 struct msix_entry *msix_entries;
492 uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ];
466 uint32_t cfg_eqn; 493 uint32_t cfg_eqn;
467 uint32_t msix_vec_nr; 494 uint32_t msix_vec_nr;
468 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ 495 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
496
469 /* Pointers to the constructed SLI4 queues */ 497 /* Pointers to the constructed SLI4 queues */
470 struct lpfc_queue **fp_eq; /* Fast-path event queue */ 498 struct lpfc_queue **hba_eq;/* Event queues for HBA */
471 struct lpfc_queue *sp_eq; /* Slow-path event queue */ 499 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
472 struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */ 500 struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
501 uint16_t *fcp_cq_map;
502
503 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
504 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
473 struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */ 505 struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
474 struct lpfc_queue *els_wq; /* Slow-path ELS work queue */ 506 struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
475 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ 507 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
476 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ 508 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
477 struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
478 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
479 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
480 509
481 /* Setup information for various queue parameters */ 510 /* Setup information for various queue parameters */
482 int eq_esize; 511 int eq_esize;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 4704e5b5088..04265a1c4e5 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,11 +18,16 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.32" 21#define LPFC_DRIVER_VERSION "8.3.34"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23
24/* Used for SLI 2/3 */
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 25#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 26#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
25 27
28/* Used for SLI4 */
29#define LPFC_DRIVER_HANDLER_NAME "lpfc:"
30
26#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ 31#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
27 LPFC_DRIVER_VERSION 32 LPFC_DRIVER_VERSION
28#define LPFC_COPYRIGHT "Copyright(c) 2004-2009 Emulex. All rights reserved." 33#define LPFC_COPYRIGHT "Copyright(c) 2004-2009 Emulex. All rights reserved."
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index e8f89264768..fcb005fa4bd 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "00.00.06.15-rc1" 36#define MEGASAS_VERSION "00.00.06.18-rc1"
37#define MEGASAS_RELDATE "Mar. 19, 2012" 37#define MEGASAS_RELDATE "Jun. 17, 2012"
38#define MEGASAS_EXT_VERSION "Mon. Mar. 19 17:00:00 PDT 2012" 38#define MEGASAS_EXT_VERSION "Tue. Jun. 17 17:00:00 PDT 2012"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
@@ -747,6 +747,7 @@ struct megasas_ctrl_info {
747#define MEGASAS_RESET_NOTICE_INTERVAL 5 747#define MEGASAS_RESET_NOTICE_INTERVAL 5
748#define MEGASAS_IOCTL_CMD 0 748#define MEGASAS_IOCTL_CMD 0
749#define MEGASAS_DEFAULT_CMD_TIMEOUT 90 749#define MEGASAS_DEFAULT_CMD_TIMEOUT 90
750#define MEGASAS_THROTTLE_QUEUE_DEPTH 16
750 751
751/* 752/*
752 * FW reports the maximum of number of commands that it can accept (maximum 753 * FW reports the maximum of number of commands that it can accept (maximum
@@ -1364,6 +1365,7 @@ struct megasas_instance {
1364 unsigned long bar; 1365 unsigned long bar;
1365 long reset_flags; 1366 long reset_flags;
1366 struct mutex reset_mutex; 1367 struct mutex reset_mutex;
1368 int throttlequeuedepth;
1367}; 1369};
1368 1370
1369enum { 1371enum {
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index ed38454228c..0393ec478cd 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : v00.00.06.15-rc1 21 * Version : v00.00.06.18-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
@@ -71,6 +71,16 @@ static int msix_disable;
71module_param(msix_disable, int, S_IRUGO); 71module_param(msix_disable, int, S_IRUGO);
72MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); 72MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
73 73
74static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
75module_param(throttlequeuedepth, int, S_IRUGO);
76MODULE_PARM_DESC(throttlequeuedepth,
77 "Adapter queue depth when throttled due to I/O timeout. Default: 16");
78
79int resetwaittime = MEGASAS_RESET_WAIT_TIME;
80module_param(resetwaittime, int, S_IRUGO);
81MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
82 "before resetting adapter. Default: 180");
83
74MODULE_LICENSE("GPL"); 84MODULE_LICENSE("GPL");
75MODULE_VERSION(MEGASAS_VERSION); 85MODULE_VERSION(MEGASAS_VERSION);
76MODULE_AUTHOR("megaraidlinux@lsi.com"); 86MODULE_AUTHOR("megaraidlinux@lsi.com");
@@ -1595,8 +1605,9 @@ megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
1595{ 1605{
1596 unsigned long flags; 1606 unsigned long flags;
1597 if (instance->flag & MEGASAS_FW_BUSY 1607 if (instance->flag & MEGASAS_FW_BUSY
1598 && time_after(jiffies, instance->last_time + 5 * HZ) 1608 && time_after(jiffies, instance->last_time + 5 * HZ)
1599 && atomic_read(&instance->fw_outstanding) < 17) { 1609 && atomic_read(&instance->fw_outstanding) <
1610 instance->throttlequeuedepth + 1) {
1600 1611
1601 spin_lock_irqsave(instance->host->host_lock, flags); 1612 spin_lock_irqsave(instance->host->host_lock, flags);
1602 instance->flag &= ~MEGASAS_FW_BUSY; 1613 instance->flag &= ~MEGASAS_FW_BUSY;
@@ -1772,7 +1783,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
1772 return SUCCESS; 1783 return SUCCESS;
1773 } 1784 }
1774 1785
1775 for (i = 0; i < wait_time; i++) { 1786 for (i = 0; i < resetwaittime; i++) {
1776 1787
1777 int outstanding = atomic_read(&instance->fw_outstanding); 1788 int outstanding = atomic_read(&instance->fw_outstanding);
1778 1789
@@ -1914,7 +1925,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1914 /* FW is busy, throttle IO */ 1925 /* FW is busy, throttle IO */
1915 spin_lock_irqsave(instance->host->host_lock, flags); 1926 spin_lock_irqsave(instance->host->host_lock, flags);
1916 1927
1917 instance->host->can_queue = 16; 1928 instance->host->can_queue = instance->throttlequeuedepth;
1918 instance->last_time = jiffies; 1929 instance->last_time = jiffies;
1919 instance->flag |= MEGASAS_FW_BUSY; 1930 instance->flag |= MEGASAS_FW_BUSY;
1920 1931
@@ -3577,6 +3588,24 @@ static int megasas_init_fw(struct megasas_instance *instance)
3577 3588
3578 kfree(ctrl_info); 3589 kfree(ctrl_info);
3579 3590
3591 /* Check for valid throttlequeuedepth module parameter */
3592 if (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY ||
3593 instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) {
3594 if (throttlequeuedepth > (instance->max_fw_cmds -
3595 MEGASAS_SKINNY_INT_CMDS))
3596 instance->throttlequeuedepth =
3597 MEGASAS_THROTTLE_QUEUE_DEPTH;
3598 else
3599 instance->throttlequeuedepth = throttlequeuedepth;
3600 } else {
3601 if (throttlequeuedepth > (instance->max_fw_cmds -
3602 MEGASAS_INT_CMDS))
3603 instance->throttlequeuedepth =
3604 MEGASAS_THROTTLE_QUEUE_DEPTH;
3605 else
3606 instance->throttlequeuedepth = throttlequeuedepth;
3607 }
3608
3580 /* 3609 /*
3581 * Setup tasklet for cmd completion 3610 * Setup tasklet for cmd completion
3582 */ 3611 */
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index a610cf1d484..ddf094e7d0a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -94,6 +94,7 @@ int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
94void megaraid_sas_kill_hba(struct megasas_instance *instance); 94void megaraid_sas_kill_hba(struct megasas_instance *instance);
95 95
96extern u32 megasas_dbg_lvl; 96extern u32 megasas_dbg_lvl;
97extern int resetwaittime;
97 98
98/** 99/**
99 * megasas_enable_intr_fusion - Enables interrupts 100 * megasas_enable_intr_fusion - Enables interrupts
@@ -461,8 +462,8 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
461 * Allocate the dynamic array first and then allocate individual 462 * Allocate the dynamic array first and then allocate individual
462 * commands. 463 * commands.
463 */ 464 */
464 fusion->cmd_list = kmalloc(sizeof(struct megasas_cmd_fusion *) 465 fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *)
465 *max_cmd, GFP_KERNEL); 466 * max_cmd, GFP_KERNEL);
466 467
467 if (!fusion->cmd_list) { 468 if (!fusion->cmd_list) {
468 printk(KERN_DEBUG "megasas: out of memory. Could not alloc " 469 printk(KERN_DEBUG "megasas: out of memory. Could not alloc "
@@ -470,9 +471,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
470 goto fail_cmd_list; 471 goto fail_cmd_list;
471 } 472 }
472 473
473 memset(fusion->cmd_list, 0, sizeof(struct megasas_cmd_fusion *)
474 *max_cmd);
475
476 max_cmd = instance->max_fw_cmds; 474 max_cmd = instance->max_fw_cmds;
477 for (i = 0; i < max_cmd; i++) { 475 for (i = 0; i < max_cmd; i++) {
478 fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion), 476 fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
@@ -2063,9 +2061,9 @@ megasas_check_reset_fusion(struct megasas_instance *instance,
2063int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance) 2061int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)
2064{ 2062{
2065 int i, outstanding, retval = 0; 2063 int i, outstanding, retval = 0;
2066 u32 fw_state, wait_time = MEGASAS_RESET_WAIT_TIME; 2064 u32 fw_state;
2067 2065
2068 for (i = 0; i < wait_time; i++) { 2066 for (i = 0; i < resetwaittime; i++) {
2069 /* Check if firmware is in fault state */ 2067 /* Check if firmware is in fault state */
2070 fw_state = instance->instancet->read_fw_status_reg( 2068 fw_state = instance->instancet->read_fw_status_reg(
2071 instance->reg_set) & MFI_STATE_MASK; 2069 instance->reg_set) & MFI_STATE_MASK;
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig
index bbb7e4bf30a..39f08dd2055 100644
--- a/drivers/scsi/mpt2sas/Kconfig
+++ b/drivers/scsi/mpt2sas/Kconfig
@@ -2,7 +2,7 @@
2# Kernel configuration file for the MPT2SAS 2# Kernel configuration file for the MPT2SAS
3# 3#
4# This code is based on drivers/scsi/mpt2sas/Kconfig 4# This code is based on drivers/scsi/mpt2sas/Kconfig
5# Copyright (C) 2007-2010 LSI Corporation 5# Copyright (C) 2007-2012 LSI Corporation
6# (mailto:DL-MPTFusionLinux@lsi.com) 6# (mailto:DL-MPTFusionLinux@lsi.com)
7 7
8# This program is free software; you can redistribute it and/or 8# This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index a80f3220c64..e960f9625c7 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2011 LSI Corporation. 2 * Copyright (c) 2000-2012 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2.h 5 * Name: mpi2.h
@@ -8,7 +8,7 @@
8 * scatter/gather formats. 8 * scatter/gather formats.
9 * Creation Date: June 21, 2006 9 * Creation Date: June 21, 2006
10 * 10 *
11 * mpi2.h Version: 02.00.23 11 * mpi2.h Version: 02.00.25
12 * 12 *
13 * Version History 13 * Version History
14 * --------------- 14 * ---------------
@@ -72,6 +72,9 @@
72 * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT. 72 * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT.
73 * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT. 73 * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT.
74 * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT. 74 * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT.
75 * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT.
76 * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT.
77 * Added Hard Reset delay timings.
75 * -------------------------------------------------------------------------- 78 * --------------------------------------------------------------------------
76 */ 79 */
77 80
@@ -97,7 +100,7 @@
97#define MPI2_VERSION_02_00 (0x0200) 100#define MPI2_VERSION_02_00 (0x0200)
98 101
99/* versioning for this MPI header set */ 102/* versioning for this MPI header set */
100#define MPI2_HEADER_VERSION_UNIT (0x17) 103#define MPI2_HEADER_VERSION_UNIT (0x19)
101#define MPI2_HEADER_VERSION_DEV (0x00) 104#define MPI2_HEADER_VERSION_DEV (0x00)
102#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 105#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
103#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 106#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -275,6 +278,11 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS
275#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4) 278#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4)
276 279
277 280
281/* Hard Reset delay timings */
282#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000)
283#define MPI2_HARD_RESET_PCIE_RESET_READ_WINDOW_MICRO_SEC (255000)
284#define MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC (256000)
285
278/***************************************************************************** 286/*****************************************************************************
279* 287*
280* Message Descriptors 288* Message Descriptors
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index de90162413c..38c5da39814 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2010 LSI Corporation. 2 * Copyright (c) 2000-2012 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_init.h 5 * Name: mpi2_init.h
6 * Title: MPI SCSI initiator mode messages and structures 6 * Title: MPI SCSI initiator mode messages and structures
7 * Creation Date: June 23, 2006 7 * Creation Date: June 23, 2006
8 * 8 *
9 * mpi2_init.h Version: 02.00.11 9 * mpi2_init.h Version: 02.00.13
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -34,6 +34,8 @@
34 * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. 34 * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it.
35 * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request. 35 * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request.
36 * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define. 36 * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define.
37 * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command
38 * Priority to match SAM-4.
37 * -------------------------------------------------------------------------- 39 * --------------------------------------------------------------------------
38 */ 40 */
39 41
@@ -194,6 +196,9 @@ typedef struct _MPI2_SCSI_IO_REQUEST
194 196
195#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800) 197#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800)
196#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11) 198#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11)
199/* alternate name for the previous field; called Command Priority in SAM-4 */
200#define MPI2_SCSIIO_CONTROL_CMDPRI_MASK (0x00007800)
201#define MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT (11)
197 202
198#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700) 203#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
199#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000) 204#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 9a925c07a9e..b0d4760bb17 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2011 LSI Corporation. 2 * Copyright (c) 2000-2012 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_ioc.h 5 * Name: mpi2_ioc.h
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: October 11, 2006 7 * Creation Date: October 11, 2006
8 * 8 *
9 * mpi2_ioc.h Version: 02.00.19 9 * mpi2_ioc.h Version: 02.00.21
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -117,6 +117,7 @@
117 * 08-24-11 02.00.19 Added PhysicalPort field to 117 * 08-24-11 02.00.19 Added PhysicalPort field to
118 * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure. 118 * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
119 * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete. 119 * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
120 * 03-29-12 02.00.21 Added a product specific range to event values.
120 * -------------------------------------------------------------------------- 121 * --------------------------------------------------------------------------
121 */ 122 */
122 123
@@ -492,7 +493,8 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY
492#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026) 493#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026)
493#define MPI2_EVENT_TEMP_THRESHOLD (0x0027) 494#define MPI2_EVENT_TEMP_THRESHOLD (0x0027)
494#define MPI2_EVENT_HOST_MESSAGE (0x0028) 495#define MPI2_EVENT_HOST_MESSAGE (0x0028)
495 496#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E)
497#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F)
496 498
497/* Log Entry Added Event data */ 499/* Log Entry Added Event data */
498 500
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
index 0601612b875..2b38af213be 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2010 LSI Corporation. 2 * Copyright (c) 2000-2012 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_raid.h 5 * Name: mpi2_raid.h
6 * Title: MPI Integrated RAID messages and structures 6 * Title: MPI Integrated RAID messages and structures
7 * Creation Date: April 26, 2007 7 * Creation Date: April 26, 2007
8 * 8 *
9 * mpi2_raid.h Version: 02.00.06 9 * mpi2_raid.h Version: 02.00.08
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -26,7 +26,7 @@
26 * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with 26 * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with
27 * related structures and defines. 27 * related structures and defines.
28 * Added product-specific range to RAID Action values. 28 * Added product-specific range to RAID Action values.
29 29 * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
30 * -------------------------------------------------------------------------- 30 * --------------------------------------------------------------------------
31 */ 31 */
32 32
@@ -181,6 +181,7 @@ typedef struct _MPI2_RAID_ACTION_REQUEST
181#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21) 181#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21)
182#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22) 182#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22)
183#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23) 183#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23)
184#define MPI2_RAID_ACTION_PHYSDISK_HIDDEN (0x24)
184#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80) 185#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80)
185#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF) 186#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF)
186 187
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 9d5a56c4b33..ffd85c511c8 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3,7 +3,7 @@
3 * for access to MPT (Message Passing Technology) firmware. 3 * for access to MPT (Message Passing Technology) firmware.
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c 5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
6 * Copyright (C) 2007-2010 LSI Corporation 6 * Copyright (C) 2007-2012 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -1978,9 +1978,9 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
1978 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1978 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1979 MPT2SAS_INTEL_RMS2LL040_BRANDING); 1979 MPT2SAS_INTEL_RMS2LL040_BRANDING);
1980 break; 1980 break;
1981 case MPT2SAS_INTEL_RAMSDALE_SSDID: 1981 case MPT2SAS_INTEL_SSD910_SSDID:
1982 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 1982 printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
1983 MPT2SAS_INTEL_RAMSDALE_BRANDING); 1983 MPT2SAS_INTEL_SSD910_BRANDING);
1984 break; 1984 break;
1985 default: 1985 default:
1986 break; 1986 break;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index b3a1a30055d..543d8d63747 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -3,7 +3,7 @@
3 * for access to MPT (Message Passing Technology) firmware. 3 * for access to MPT (Message Passing Technology) firmware.
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.h 5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.h
6 * Copyright (C) 2007-2010 LSI Corporation 6 * Copyright (C) 2007-2012 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -69,8 +69,8 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "13.100.00.00" 72#define MPT2SAS_DRIVER_VERSION "14.100.00.00"
73#define MPT2SAS_MAJOR_VERSION 13 73#define MPT2SAS_MAJOR_VERSION 14
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 00 75#define MPT2SAS_BUILD_VERSION 00
76#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 00
@@ -171,8 +171,8 @@
171 "Intel Integrated RAID Module RMS2LL040" 171 "Intel Integrated RAID Module RMS2LL040"
172#define MPT2SAS_INTEL_RS25GB008_BRANDING \ 172#define MPT2SAS_INTEL_RS25GB008_BRANDING \
173 "Intel(R) RAID Controller RS25GB008" 173 "Intel(R) RAID Controller RS25GB008"
174#define MPT2SAS_INTEL_RAMSDALE_BRANDING \ 174#define MPT2SAS_INTEL_SSD910_BRANDING \
175 "Intel 720 Series SSD" 175 "Intel(R) SSD 910 Series"
176/* 176/*
177 * Intel HBA SSDIDs 177 * Intel HBA SSDIDs
178 */ 178 */
@@ -183,7 +183,7 @@
183#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E 183#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
184#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F 184#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
185#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000 185#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000
186#define MPT2SAS_INTEL_RAMSDALE_SSDID 0x3700 186#define MPT2SAS_INTEL_SSD910_SSDID 0x3700
187 187
188/* 188/*
189 * HP HBA branding 189 * HP HBA branding
@@ -1096,6 +1096,8 @@ int mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1096 *mpi_reply, Mpi2IOUnitPage1_t *config_page); 1096 *mpi_reply, Mpi2IOUnitPage1_t *config_page);
1097int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t 1097int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1098 *mpi_reply, Mpi2IOUnitPage1_t *config_page); 1098 *mpi_reply, Mpi2IOUnitPage1_t *config_page);
1099int mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc,
1100 Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz);
1099int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t 1101int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1100 *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz); 1102 *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
1101int mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, 1103int mpt2sas_config_set_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 2b4d37613d3..863778071a9 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -2,7 +2,7 @@
2 * This module provides common API for accessing firmware configuration pages 2 * This module provides common API for accessing firmware configuration pages
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
5 * Copyright (C) 2007-2010 LSI Corporation 5 * Copyright (C) 2007-2012 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
@@ -683,6 +683,42 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
683} 683}
684 684
685/** 685/**
686 * mpt2sas_config_get_iounit_pg3 - obtain iounit page 3
687 * @ioc: per adapter object
688 * @mpi_reply: reply mf payload returned from firmware
689 * @config_page: contents of the config page
690 * @sz: size of buffer passed in config_page
691 * Context: sleep.
692 *
693 * Returns 0 for success, non-zero for failure.
694 */
695int
696mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc,
697 Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz)
698{
699 Mpi2ConfigRequest_t mpi_request;
700 int r;
701
702 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
703 mpi_request.Function = MPI2_FUNCTION_CONFIG;
704 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
705 mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
706 mpi_request.Header.PageNumber = 3;
707 mpi_request.Header.PageVersion = MPI2_IOUNITPAGE3_PAGEVERSION;
708 mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
709 r = _config_request(ioc, &mpi_request, mpi_reply,
710 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
711 if (r)
712 goto out;
713
714 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
715 r = _config_request(ioc, &mpi_request, mpi_reply,
716 MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
717 out:
718 return r;
719}
720
721/**
686 * mpt2sas_config_get_ioc_pg8 - obtain ioc page 8 722 * mpt2sas_config_get_ioc_pg8 - obtain ioc page 8
687 * @ioc: per adapter object 723 * @ioc: per adapter object
688 * @mpi_reply: reply mf payload returned from firmware 724 * @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 49bdd2dc845..08685c4cf23 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -3,7 +3,7 @@
3 * controllers 3 * controllers
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c 5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
6 * Copyright (C) 2007-2010 LSI Corporation 6 * Copyright (C) 2007-2012 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -2181,10 +2181,12 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
2181 return -EAGAIN; 2181 return -EAGAIN;
2182 2182
2183 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; 2183 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
2184 if (state == NON_BLOCKING && !mutex_trylock(&ioc->ctl_cmds.mutex)) 2184 if (state == NON_BLOCKING) {
2185 return -EAGAIN; 2185 if (!mutex_trylock(&ioc->ctl_cmds.mutex))
2186 else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) 2186 return -EAGAIN;
2187 } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
2187 return -ERESTARTSYS; 2188 return -ERESTARTSYS;
2189 }
2188 2190
2189 switch (cmd) { 2191 switch (cmd) {
2190 case MPT2IOCINFO: 2192 case MPT2IOCINFO:
@@ -2690,6 +2692,75 @@ _ctl_ioc_reply_queue_count_show(struct device *cdev,
2690static DEVICE_ATTR(reply_queue_count, S_IRUGO, 2692static DEVICE_ATTR(reply_queue_count, S_IRUGO,
2691 _ctl_ioc_reply_queue_count_show, NULL); 2693 _ctl_ioc_reply_queue_count_show, NULL);
2692 2694
2695/**
2696 * _ctl_BRM_status_show - Backup Rail Monitor Status
2697 * @cdev - pointer to embedded class device
2698 * @buf - the buffer returned
2699 *
2700 * This is number of reply queues
2701 *
2702 * A sysfs 'read-only' shost attribute.
2703 */
2704static ssize_t
2705_ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
2706 char *buf)
2707{
2708 struct Scsi_Host *shost = class_to_shost(cdev);
2709 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2710 Mpi2IOUnitPage3_t *io_unit_pg3 = NULL;
2711 Mpi2ConfigReply_t mpi_reply;
2712 u16 backup_rail_monitor_status = 0;
2713 u16 ioc_status;
2714 int sz;
2715 ssize_t rc = 0;
2716
2717 if (!ioc->is_warpdrive) {
2718 printk(MPT2SAS_ERR_FMT "%s: BRM attribute is only for"\
2719 "warpdrive\n", ioc->name, __func__);
2720 goto out;
2721 }
2722
2723 /* allocate upto GPIOVal 36 entries */
2724 sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
2725 io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
2726 if (!io_unit_pg3) {
2727 printk(MPT2SAS_ERR_FMT "%s: failed allocating memory"\
2728 "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz);
2729 goto out;
2730 }
2731
2732 if (mpt2sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
2733 0) {
2734 printk(MPT2SAS_ERR_FMT
2735 "%s: failed reading iounit_pg3\n", ioc->name,
2736 __func__);
2737 goto out;
2738 }
2739
2740 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
2741 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2742 printk(MPT2SAS_ERR_FMT "%s: iounit_pg3 failed with"\
2743 "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status);
2744 goto out;
2745 }
2746
2747 if (io_unit_pg3->GPIOCount < 25) {
2748 printk(MPT2SAS_ERR_FMT "%s: iounit_pg3->GPIOCount less than"\
2749 "25 entries, detected (%d) entries\n", ioc->name, __func__,
2750 io_unit_pg3->GPIOCount);
2751 goto out;
2752 }
2753
2754 /* BRM status is in bit zero of GPIOVal[24] */
2755 backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]);
2756 rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
2757
2758 out:
2759 kfree(io_unit_pg3);
2760 return rc;
2761}
2762static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL);
2763
2693struct DIAG_BUFFER_START { 2764struct DIAG_BUFFER_START {
2694 __le32 Size; 2765 __le32 Size;
2695 __le32 DiagVersion; 2766 __le32 DiagVersion;
@@ -2901,6 +2972,7 @@ struct device_attribute *mpt2sas_host_attrs[] = {
2901 &dev_attr_host_trace_buffer, 2972 &dev_attr_host_trace_buffer,
2902 &dev_attr_host_trace_buffer_enable, 2973 &dev_attr_host_trace_buffer_enable,
2903 &dev_attr_reply_queue_count, 2974 &dev_attr_reply_queue_count,
2975 &dev_attr_BRM_status,
2904 NULL, 2976 NULL,
2905}; 2977};
2906 2978
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index 11ff1d5fb8f..b5eb0d1b8ea 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -3,7 +3,7 @@
3 * controllers 3 * controllers
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h 5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
6 * Copyright (C) 2007-2010 LSI Corporation 6 * Copyright (C) 2007-2012 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
index 9731f8e661b..69cc7d0c112 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_debug.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -2,7 +2,7 @@
2 * Logging Support for MPT (Message Passing Technology) based controllers 2 * Logging Support for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
5 * Copyright (C) 2007-2010 LSI Corporation 5 * Copyright (C) 2007-2012 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 1ccae45c527..af4e6c451b1 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2,7 +2,7 @@
2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers 2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
5 * Copyright (C) 2007-2010 LSI Corporation 5 * Copyright (C) 2007-2012 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
@@ -119,6 +119,15 @@ module_param(diag_buffer_enable, int, 0);
119MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers " 119MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
120 "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); 120 "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
121 121
122static int disable_discovery = -1;
123module_param(disable_discovery, int, 0);
124MODULE_PARM_DESC(disable_discovery, " disable discovery ");
125
126/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
127static int prot_mask = 0;
128module_param(prot_mask, int, 0);
129MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
130
122/** 131/**
123 * struct sense_info - common structure for obtaining sense keys 132 * struct sense_info - common structure for obtaining sense keys
124 * @skey: sense key 133 * @skey: sense key
@@ -3768,8 +3777,6 @@ static void
3768_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) 3777_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
3769{ 3778{
3770 u8 ascq; 3779 u8 ascq;
3771 u8 sk;
3772 u8 host_byte;
3773 3780
3774 switch (ioc_status) { 3781 switch (ioc_status) {
3775 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 3782 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
@@ -3786,16 +3793,8 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
3786 break; 3793 break;
3787 } 3794 }
3788 3795
3789 if (scmd->sc_data_direction == DMA_TO_DEVICE) { 3796 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10, ascq);
3790 sk = ILLEGAL_REQUEST; 3797 scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
3791 host_byte = DID_ABORT;
3792 } else {
3793 sk = ABORTED_COMMAND;
3794 host_byte = DID_OK;
3795 }
3796
3797 scsi_build_sense_buffer(0, scmd->sense_buffer, sk, 0x10, ascq);
3798 scmd->result = DRIVER_SENSE << 24 | (host_byte << 16) |
3799 SAM_STAT_CHECK_CONDITION; 3798 SAM_STAT_CHECK_CONDITION;
3800} 3799}
3801 3800
@@ -5973,8 +5972,14 @@ _scsih_sas_discovery_event(struct MPT2SAS_ADAPTER *ioc,
5973#endif 5972#endif
5974 5973
5975 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED && 5974 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
5976 !ioc->sas_hba.num_phys) 5975 !ioc->sas_hba.num_phys) {
5976 if (disable_discovery > 0 && ioc->shost_recovery) {
5977 /* Wait for the reset to complete */
5978 while (ioc->shost_recovery)
5979 ssleep(1);
5980 }
5977 _scsih_sas_host_add(ioc); 5981 _scsih_sas_host_add(ioc);
5982 }
5978} 5983}
5979 5984
5980/** 5985/**
@@ -7254,7 +7259,8 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
7254 _scsih_search_responding_sas_devices(ioc); 7259 _scsih_search_responding_sas_devices(ioc);
7255 _scsih_search_responding_raid_devices(ioc); 7260 _scsih_search_responding_raid_devices(ioc);
7256 _scsih_search_responding_expanders(ioc); 7261 _scsih_search_responding_expanders(ioc);
7257 if (!ioc->is_driver_loading) { 7262 if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
7263 !ioc->sas_hba.num_phys)) {
7258 _scsih_prep_device_scan(ioc); 7264 _scsih_prep_device_scan(ioc);
7259 _scsih_search_responding_sas_devices(ioc); 7265 _scsih_search_responding_sas_devices(ioc);
7260 _scsih_search_responding_raid_devices(ioc); 7266 _scsih_search_responding_raid_devices(ioc);
@@ -7929,6 +7935,9 @@ _scsih_scan_start(struct Scsi_Host *shost)
7929 if (diag_buffer_enable != -1 && diag_buffer_enable != 0) 7935 if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
7930 mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable); 7936 mpt2sas_enable_diag_buffer(ioc, diag_buffer_enable);
7931 7937
7938 if (disable_discovery > 0)
7939 return;
7940
7932 ioc->start_scan = 1; 7941 ioc->start_scan = 1;
7933 rc = mpt2sas_port_enable(ioc); 7942 rc = mpt2sas_port_enable(ioc);
7934 7943
@@ -7950,6 +7959,12 @@ _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
7950{ 7959{
7951 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 7960 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
7952 7961
7962 if (disable_discovery > 0) {
7963 ioc->is_driver_loading = 0;
7964 ioc->wait_for_discovery_to_complete = 0;
7965 return 1;
7966 }
7967
7953 if (time >= (300 * HZ)) { 7968 if (time >= (300 * HZ)) {
7954 ioc->base_cmds.status = MPT2_CMD_NOT_USED; 7969 ioc->base_cmds.status = MPT2_CMD_NOT_USED;
7955 printk(MPT2SAS_INFO_FMT "port enable: FAILED with timeout " 7970 printk(MPT2SAS_INFO_FMT "port enable: FAILED with timeout "
@@ -8055,8 +8070,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8055 if (max_sectors != 0xFFFF) { 8070 if (max_sectors != 0xFFFF) {
8056 if (max_sectors < 64) { 8071 if (max_sectors < 64) {
8057 shost->max_sectors = 64; 8072 shost->max_sectors = 64;
8058 printk(MPT2SAS_WARN_FMT "Invalid value %d passed " 8073 printk(MPT2SAS_WARN_FMT "Invalid value %d passed "\
8059 "for max_sectors, range is 64 to 8192. Assigning " 8074 "for max_sectors, range is 64 to 32767. Assigning "\
8060 "value of 64.\n", ioc->name, max_sectors); 8075 "value of 64.\n", ioc->name, max_sectors);
8061 } else if (max_sectors > 32767) { 8076 } else if (max_sectors > 32767) {
8062 shost->max_sectors = 32767; 8077 shost->max_sectors = 32767;
@@ -8078,8 +8093,14 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8078 goto out_add_shost_fail; 8093 goto out_add_shost_fail;
8079 } 8094 }
8080 8095
8081 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION 8096 /* register EEDP capabilities with SCSI layer */
8082 | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION); 8097 if (prot_mask)
8098 scsi_host_set_prot(shost, prot_mask);
8099 else
8100 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
8101 | SHOST_DIF_TYPE2_PROTECTION
8102 | SHOST_DIF_TYPE3_PROTECTION);
8103
8083 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 8104 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
8084 8105
8085 /* event thread */ 8106 /* event thread */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index c6cf20f6072..8c2ffbe6af0 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -2,7 +2,7 @@
2 * SAS Transport Layer for MPT (Message Passing Technology) based controllers 2 * SAS Transport Layer for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
5 * Copyright (C) 2007-2010 LSI Corporation 5 * Copyright (C) 2007-2012 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 4539d59a085..a3776d6ced6 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1629,7 +1629,7 @@ int mvs_abort_task(struct sas_task *task)
1629 mv_dprintk("mvs_abort_task() mvi=%p task=%p " 1629 mv_dprintk("mvs_abort_task() mvi=%p task=%p "
1630 "slot=%p slot_idx=x%x\n", 1630 "slot=%p slot_idx=x%x\n",
1631 mvi, task, slot, slot_idx); 1631 mvi, task, slot, slot_idx);
1632 mvs_tmf_timedout((unsigned long)task); 1632 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1633 mvs_slot_task_free(mvi, task, slot, slot_idx); 1633 mvs_slot_task_free(mvi, task, slot, slot_idx);
1634 rc = TMF_RESP_FUNC_COMPLETE; 1634 rc = TMF_RESP_FUNC_COMPLETE;
1635 goto out; 1635 goto out;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 5ab953029f8..1c28215f8be 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -26,7 +26,7 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
26 struct qla_hw_data *ha = vha->hw; 26 struct qla_hw_data *ha = vha->hw;
27 int rval = 0; 27 int rval = 0;
28 28
29 if (ha->fw_dump_reading == 0) 29 if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
30 return 0; 30 return 0;
31 31
32 if (IS_QLA82XX(ha)) { 32 if (IS_QLA82XX(ha)) {
@@ -39,9 +39,14 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
39 rval = memory_read_from_buffer(buf, count, 39 rval = memory_read_from_buffer(buf, count,
40 &off, ha->md_dump, ha->md_dump_size); 40 &off, ha->md_dump, ha->md_dump_size);
41 return rval; 41 return rval;
42 } else 42 } else if (ha->mctp_dumped && ha->mctp_dump_reading)
43 return memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
44 MCTP_DUMP_SIZE);
45 else if (ha->fw_dump_reading)
43 return memory_read_from_buffer(buf, count, &off, ha->fw_dump, 46 return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
44 ha->fw_dump_len); 47 ha->fw_dump_len);
48 else
49 return 0;
45} 50}
46 51
47static ssize_t 52static ssize_t
@@ -107,6 +112,22 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
107 if (IS_QLA82XX(ha)) 112 if (IS_QLA82XX(ha))
108 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 113 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
109 break; 114 break;
115 case 6:
116 if (!ha->mctp_dump_reading)
117 break;
118 ql_log(ql_log_info, vha, 0x70c1,
119 "MCTP dump cleared on (%ld).\n", vha->host_no);
120 ha->mctp_dump_reading = 0;
121 ha->mctp_dumped = 0;
122 break;
123 case 7:
124 if (ha->mctp_dumped && !ha->mctp_dump_reading) {
125 ha->mctp_dump_reading = 1;
126 ql_log(ql_log_info, vha, 0x70c2,
127 "Raw mctp dump ready for read on (%ld).\n",
128 vha->host_no);
129 }
130 break;
110 } 131 }
111 return count; 132 return count;
112} 133}
@@ -564,6 +585,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
564 struct qla_hw_data *ha = vha->hw; 585 struct qla_hw_data *ha = vha->hw;
565 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 586 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
566 int type; 587 int type;
588 uint32_t idc_control;
567 589
568 if (off != 0) 590 if (off != 0)
569 return -EINVAL; 591 return -EINVAL;
@@ -587,22 +609,36 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
587 scsi_unblock_requests(vha->host); 609 scsi_unblock_requests(vha->host);
588 break; 610 break;
589 case 0x2025d: 611 case 0x2025d:
590 if (!IS_QLA81XX(ha) || !IS_QLA8031(ha)) 612 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
591 return -EPERM; 613 return -EPERM;
592 614
593 ql_log(ql_log_info, vha, 0x706f, 615 ql_log(ql_log_info, vha, 0x706f,
594 "Issuing MPI reset.\n"); 616 "Issuing MPI reset.\n");
595 617
596 /* Make sure FC side is not in reset */ 618 if (IS_QLA83XX(ha)) {
597 qla2x00_wait_for_hba_online(vha); 619 uint32_t idc_control;
598 620
599 /* Issue MPI reset */ 621 qla83xx_idc_lock(vha, 0);
600 scsi_block_requests(vha->host); 622 __qla83xx_get_idc_control(vha, &idc_control);
601 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS) 623 idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
602 ql_log(ql_log_warn, vha, 0x7070, 624 __qla83xx_set_idc_control(vha, idc_control);
603 "MPI reset failed.\n"); 625 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
604 scsi_unblock_requests(vha->host); 626 QLA8XXX_DEV_NEED_RESET);
605 break; 627 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
628 qla83xx_idc_unlock(vha, 0);
629 break;
630 } else {
631 /* Make sure FC side is not in reset */
632 qla2x00_wait_for_hba_online(vha);
633
634 /* Issue MPI reset */
635 scsi_block_requests(vha->host);
636 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
637 ql_log(ql_log_warn, vha, 0x7070,
638 "MPI reset failed.\n");
639 scsi_unblock_requests(vha->host);
640 break;
641 }
606 case 0x2025e: 642 case 0x2025e:
607 if (!IS_QLA82XX(ha) || vha != base_vha) { 643 if (!IS_QLA82XX(ha) || vha != base_vha) {
608 ql_log(ql_log_info, vha, 0x7071, 644 ql_log(ql_log_info, vha, 0x7071,
@@ -616,6 +652,29 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
616 qla2xxx_wake_dpc(vha); 652 qla2xxx_wake_dpc(vha);
617 qla2x00_wait_for_fcoe_ctx_reset(vha); 653 qla2x00_wait_for_fcoe_ctx_reset(vha);
618 break; 654 break;
655 case 0x2025f:
656 if (!IS_QLA8031(ha))
657 return -EPERM;
658 ql_log(ql_log_info, vha, 0x70bc,
659 "Disabling Reset by IDC control\n");
660 qla83xx_idc_lock(vha, 0);
661 __qla83xx_get_idc_control(vha, &idc_control);
662 idc_control |= QLA83XX_IDC_RESET_DISABLED;
663 __qla83xx_set_idc_control(vha, idc_control);
664 qla83xx_idc_unlock(vha, 0);
665 break;
666 case 0x20260:
667 if (!IS_QLA8031(ha))
668 return -EPERM;
669 ql_log(ql_log_info, vha, 0x70bd,
670 "Enabling Reset by IDC control\n");
671 qla83xx_idc_lock(vha, 0);
672 __qla83xx_get_idc_control(vha, &idc_control);
673 idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
674 __qla83xx_set_idc_control(vha, idc_control);
675 qla83xx_idc_unlock(vha, 0);
676 break;
677
619 } 678 }
620 return count; 679 return count;
621} 680}
@@ -1251,6 +1310,49 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1251 state[1], state[2], state[3], state[4]); 1310 state[1], state[2], state[3], state[4]);
1252} 1311}
1253 1312
1313static ssize_t
1314qla2x00_diag_requests_show(struct device *dev,
1315 struct device_attribute *attr, char *buf)
1316{
1317 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1318
1319 if (!IS_BIDI_CAPABLE(vha->hw))
1320 return snprintf(buf, PAGE_SIZE, "\n");
1321
1322 return snprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
1323}
1324
1325static ssize_t
1326qla2x00_diag_megabytes_show(struct device *dev,
1327 struct device_attribute *attr, char *buf)
1328{
1329 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1330
1331 if (!IS_BIDI_CAPABLE(vha->hw))
1332 return snprintf(buf, PAGE_SIZE, "\n");
1333
1334 return snprintf(buf, PAGE_SIZE, "%llu\n",
1335 vha->bidi_stats.transfer_bytes >> 20);
1336}
1337
1338static ssize_t
1339qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
1340 char *buf)
1341{
1342 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1343 struct qla_hw_data *ha = vha->hw;
1344 uint32_t size;
1345
1346 if (!ha->fw_dumped)
1347 size = 0;
1348 else if (IS_QLA82XX(ha))
1349 size = ha->md_template_size + ha->md_dump_size;
1350 else
1351 size = ha->fw_dump_len;
1352
1353 return snprintf(buf, PAGE_SIZE, "%d\n", size);
1354}
1355
1254static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 1356static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1255static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 1357static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1256static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 1358static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1289,6 +1391,9 @@ static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1289static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); 1391static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1290static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); 1392static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1291static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL); 1393static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
1394static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
1395static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
1396static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
1292 1397
1293struct device_attribute *qla2x00_host_attrs[] = { 1398struct device_attribute *qla2x00_host_attrs[] = {
1294 &dev_attr_driver_version, 1399 &dev_attr_driver_version,
@@ -1318,6 +1423,9 @@ struct device_attribute *qla2x00_host_attrs[] = {
1318 &dev_attr_fw_state, 1423 &dev_attr_fw_state,
1319 &dev_attr_optrom_gold_fw_version, 1424 &dev_attr_optrom_gold_fw_version,
1320 &dev_attr_thermal_temp, 1425 &dev_attr_thermal_temp,
1426 &dev_attr_diag_requests,
1427 &dev_attr_diag_megabytes,
1428 &dev_attr_fw_dump_size,
1321 NULL, 1429 NULL,
1322}; 1430};
1323 1431
@@ -1704,7 +1812,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1704 1812
1705 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 1813 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
1706 if (ha->fw_attributes & BIT_4) { 1814 if (ha->fw_attributes & BIT_4) {
1707 int prot = 0; 1815 int prot = 0, guard;
1708 vha->flags.difdix_supported = 1; 1816 vha->flags.difdix_supported = 1;
1709 ql_dbg(ql_dbg_user, vha, 0x7082, 1817 ql_dbg(ql_dbg_user, vha, 0x7082,
1710 "Registered for DIF/DIX type 1 and 3 protection.\n"); 1818 "Registered for DIF/DIX type 1 and 3 protection.\n");
@@ -1717,7 +1825,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1717 | SHOST_DIX_TYPE1_PROTECTION 1825 | SHOST_DIX_TYPE1_PROTECTION
1718 | SHOST_DIX_TYPE2_PROTECTION 1826 | SHOST_DIX_TYPE2_PROTECTION
1719 | SHOST_DIX_TYPE3_PROTECTION); 1827 | SHOST_DIX_TYPE3_PROTECTION);
1720 scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC); 1828
1829 guard = SHOST_DIX_GUARD_CRC;
1830
1831 if (IS_PI_IPGUARD_CAPABLE(ha) &&
1832 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
1833 guard |= SHOST_DIX_GUARD_IP;
1834
1835 scsi_host_set_guard(vha->host, guard);
1721 } else 1836 } else
1722 vha->flags.difdix_supported = 0; 1837 vha->flags.difdix_supported = 0;
1723 } 1838 }
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index c68883806c5..2f9bddd3c61 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -530,13 +530,13 @@ done_unmap_sg:
530done: 530done:
531 return rval; 531 return rval;
532} 532}
533 533/*
534/* Set the port configuration to enable the 534 * Set the port configuration to enable the internal or external loopback
535 * internal loopback on ISP81XX 535 * depending on the loopback mode.
536 */ 536 */
537static inline int 537static inline int
538qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config, 538qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
539 uint16_t *new_config) 539 uint16_t *new_config, uint16_t mode)
540{ 540{
541 int ret = 0; 541 int ret = 0;
542 int rval = 0; 542 int rval = 0;
@@ -545,8 +545,14 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
545 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 545 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
546 goto done_set_internal; 546 goto done_set_internal;
547 547
548 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 548 if (mode == INTERNAL_LOOPBACK)
549 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; 549 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
550 else if (mode == EXTERNAL_LOOPBACK)
551 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
552 ql_dbg(ql_dbg_user, vha, 0x70be,
553 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
554
555 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
550 556
551 ha->notify_dcbx_comp = 1; 557 ha->notify_dcbx_comp = 1;
552 ret = qla81xx_set_port_config(vha, new_config); 558 ret = qla81xx_set_port_config(vha, new_config);
@@ -562,9 +568,17 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
562 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) { 568 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
563 ql_dbg(ql_dbg_user, vha, 0x7022, 569 ql_dbg(ql_dbg_user, vha, 0x7022,
564 "State change notification not received.\n"); 570 "State change notification not received.\n");
565 } else 571 rval = -EINVAL;
566 ql_dbg(ql_dbg_user, vha, 0x7023, 572 } else {
567 "State change received.\n"); 573 if (ha->flags.idc_compl_status) {
574 ql_dbg(ql_dbg_user, vha, 0x70c3,
575 "Bad status in IDC Completion AEN\n");
576 rval = -EINVAL;
577 ha->flags.idc_compl_status = 0;
578 } else
579 ql_dbg(ql_dbg_user, vha, 0x7023,
580 "State change received.\n");
581 }
568 582
569 ha->notify_dcbx_comp = 0; 583 ha->notify_dcbx_comp = 0;
570 584
@@ -572,11 +586,9 @@ done_set_internal:
572 return rval; 586 return rval;
573} 587}
574 588
575/* Set the port configuration to disable the 589/* Disable loopback mode */
576 * internal loopback on ISP81XX
577 */
578static inline int 590static inline int
579qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config, 591qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
580 int wait) 592 int wait)
581{ 593{
582 int ret = 0; 594 int ret = 0;
@@ -589,8 +601,12 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
589 601
590 memset(new_config, 0 , sizeof(new_config)); 602 memset(new_config, 0 , sizeof(new_config));
591 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 603 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
592 ENABLE_INTERNAL_LOOPBACK) { 604 ENABLE_INTERNAL_LOOPBACK ||
605 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
606 ENABLE_EXTERNAL_LOOPBACK) {
593 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; 607 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
608 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
609 (new_config[0] & INTERNAL_LOOPBACK_MASK));
594 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; 610 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
595 611
596 ha->notify_dcbx_comp = wait; 612 ha->notify_dcbx_comp = wait;
@@ -707,7 +723,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
707 723
708 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 724 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
709 725
710 if ((ha->current_topology == ISP_CFG_F || 726 if (atomic_read(&vha->loop_state) == LOOP_READY &&
727 (ha->current_topology == ISP_CFG_F ||
711 ((IS_QLA81XX(ha) || IS_QLA8031(ha)) && 728 ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
712 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE 729 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
713 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 730 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
@@ -729,30 +746,24 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
729 goto done_free_dma_req; 746 goto done_free_dma_req;
730 } 747 }
731 748
732 if (elreq.options != EXTERNAL_LOOPBACK) { 749 ql_dbg(ql_dbg_user, vha, 0x70c0,
733 ql_dbg(ql_dbg_user, vha, 0x7020, 750 "elreq.options=%04x\n", elreq.options);
734 "Internal: current port config = %x\n", 751
735 config[0]); 752 if (elreq.options == EXTERNAL_LOOPBACK)
736 if (qla81xx_set_internal_loopback(vha, config, 753 if (IS_QLA8031(ha))
737 new_config)) { 754 rval = qla81xx_set_loopback_mode(vha,
738 ql_log(ql_log_warn, vha, 0x7024, 755 config, new_config, elreq.options);
739 "Internal loopback failed.\n"); 756 else
740 bsg_job->reply->result = 757 rval = qla81xx_reset_loopback_mode(vha,
741 (DID_ERROR << 16); 758 config, 1);
742 rval = -EPERM; 759 else
743 goto done_free_dma_req; 760 rval = qla81xx_set_loopback_mode(vha, config,
744 } 761 new_config, elreq.options);
745 } else { 762
746 /* For external loopback to work 763 if (rval) {
747 * ensure internal loopback is disabled 764 bsg_job->reply->result = (DID_ERROR << 16);
748 */ 765 rval = -EPERM;
749 if (qla81xx_reset_internal_loopback(vha, 766 goto done_free_dma_req;
750 config, 1)) {
751 bsg_job->reply->result =
752 (DID_ERROR << 16);
753 rval = -EPERM;
754 goto done_free_dma_req;
755 }
756 } 767 }
757 768
758 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 769 type = "FC_BSG_HST_VENDOR_LOOPBACK";
@@ -766,7 +777,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
766 /* Revert back to original port config 777 /* Revert back to original port config
767 * Also clear internal loopback 778 * Also clear internal loopback
768 */ 779 */
769 qla81xx_reset_internal_loopback(vha, 780 qla81xx_reset_loopback_mode(vha,
770 new_config, 0); 781 new_config, 0);
771 } 782 }
772 783
@@ -1364,7 +1375,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1364 struct qla_hw_data *ha = vha->hw; 1375 struct qla_hw_data *ha = vha->hw;
1365 int rval = 0; 1376 int rval = 0;
1366 1377
1367 if (ha->flags.isp82xx_reset_hdlr_active) 1378 if (ha->flags.nic_core_reset_hdlr_active)
1368 return -EBUSY; 1379 return -EBUSY;
1369 1380
1370 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1381 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
@@ -1560,6 +1571,276 @@ done:
1560} 1571}
1561 1572
1562static int 1573static int
1574qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
1575{
1576 struct Scsi_Host *host = bsg_job->shost;
1577 scsi_qla_host_t *vha = shost_priv(host);
1578 struct qla_hw_data *ha = vha->hw;
1579 int rval = 0;
1580 uint8_t bsg[DMA_POOL_SIZE];
1581 struct qla_i2c_access *i2c = (void *)bsg;
1582 dma_addr_t sfp_dma;
1583 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1584 if (!sfp) {
1585 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1586 EXT_STATUS_NO_MEMORY;
1587 goto done;
1588 }
1589
1590 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1591 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1592
1593 memcpy(sfp, i2c->buffer, i2c->length);
1594 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1595 i2c->device, i2c->offset, i2c->length, i2c->option);
1596
1597 if (rval) {
1598 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1599 EXT_STATUS_MAILBOX;
1600 goto dealloc;
1601 }
1602
1603 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1604
1605dealloc:
1606 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1607
1608done:
1609 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1610 bsg_job->reply->result = DID_OK << 16;
1611 bsg_job->job_done(bsg_job);
1612
1613 return 0;
1614}
1615
1616static int
1617qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
1618{
1619 struct Scsi_Host *host = bsg_job->shost;
1620 scsi_qla_host_t *vha = shost_priv(host);
1621 struct qla_hw_data *ha = vha->hw;
1622 int rval = 0;
1623 uint8_t bsg[DMA_POOL_SIZE];
1624 struct qla_i2c_access *i2c = (void *)bsg;
1625 dma_addr_t sfp_dma;
1626 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1627 if (!sfp) {
1628 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1629 EXT_STATUS_NO_MEMORY;
1630 goto done;
1631 }
1632
1633 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1634 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1635
1636 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1637 i2c->device, i2c->offset, i2c->length, i2c->option);
1638
1639 if (rval) {
1640 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1641 EXT_STATUS_MAILBOX;
1642 goto dealloc;
1643 }
1644
1645 memcpy(i2c->buffer, sfp, i2c->length);
1646 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1647 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1648
1649 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1650
1651dealloc:
1652 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1653
1654done:
1655 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1656 bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c);
1657 bsg_job->reply->result = DID_OK << 16;
1658 bsg_job->job_done(bsg_job);
1659
1660 return 0;
1661}
1662
1663static int
1664qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
1665{
1666 struct Scsi_Host *host = bsg_job->shost;
1667 scsi_qla_host_t *vha = shost_priv(host);
1668 struct qla_hw_data *ha = vha->hw;
1669 uint16_t thread_id;
1670 uint32_t rval = EXT_STATUS_OK;
1671 uint16_t req_sg_cnt = 0;
1672 uint16_t rsp_sg_cnt = 0;
1673 uint16_t nextlid = 0;
1674 uint32_t tot_dsds;
1675 srb_t *sp = NULL;
1676 uint32_t req_data_len = 0;
1677 uint32_t rsp_data_len = 0;
1678
1679 /* Check the type of the adapter */
1680 if (!IS_BIDI_CAPABLE(ha)) {
1681 ql_log(ql_log_warn, vha, 0x70a0,
1682 "This adapter is not supported\n");
1683 rval = EXT_STATUS_NOT_SUPPORTED;
1684 goto done;
1685 }
1686
1687 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1688 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1689 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1690 rval = EXT_STATUS_BUSY;
1691 goto done;
1692 }
1693
1694 /* Check if host is online */
1695 if (!vha->flags.online) {
1696 ql_log(ql_log_warn, vha, 0x70a1,
1697 "Host is not online\n");
1698 rval = EXT_STATUS_DEVICE_OFFLINE;
1699 goto done;
1700 }
1701
1702 /* Check if cable is plugged in or not */
1703 if (vha->device_flags & DFLG_NO_CABLE) {
1704 ql_log(ql_log_warn, vha, 0x70a2,
1705 "Cable is unplugged...\n");
1706 rval = EXT_STATUS_INVALID_CFG;
1707 goto done;
1708 }
1709
1710 /* Check if the switch is connected or not */
1711 if (ha->current_topology != ISP_CFG_F) {
1712 ql_log(ql_log_warn, vha, 0x70a3,
1713 "Host is not connected to the switch\n");
1714 rval = EXT_STATUS_INVALID_CFG;
1715 goto done;
1716 }
1717
1718 /* Check if operating mode is P2P */
1719 if (ha->operating_mode != P2P) {
1720 ql_log(ql_log_warn, vha, 0x70a4,
1721 "Host is operating mode is not P2p\n");
1722 rval = EXT_STATUS_INVALID_CFG;
1723 goto done;
1724 }
1725
1726 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1727
1728 mutex_lock(&ha->selflogin_lock);
1729 if (vha->self_login_loop_id == 0) {
1730 /* Initialize all required fields of fcport */
1731 vha->bidir_fcport.vha = vha;
1732 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1733 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1734 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1735 vha->bidir_fcport.loop_id = vha->loop_id;
1736
1737 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1738 ql_log(ql_log_warn, vha, 0x70a7,
1739 "Failed to login port %06X for bidirectional IOCB\n",
1740 vha->bidir_fcport.d_id.b24);
1741 mutex_unlock(&ha->selflogin_lock);
1742 rval = EXT_STATUS_MAILBOX;
1743 goto done;
1744 }
1745 vha->self_login_loop_id = nextlid - 1;
1746
1747 }
1748 /* Assign the self login loop id to fcport */
1749 mutex_unlock(&ha->selflogin_lock);
1750
1751 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1752
1753 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1754 bsg_job->request_payload.sg_list,
1755 bsg_job->request_payload.sg_cnt,
1756 DMA_TO_DEVICE);
1757
1758 if (!req_sg_cnt) {
1759 rval = EXT_STATUS_NO_MEMORY;
1760 goto done;
1761 }
1762
1763 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1764 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1765 DMA_FROM_DEVICE);
1766
1767 if (!rsp_sg_cnt) {
1768 rval = EXT_STATUS_NO_MEMORY;
1769 goto done_unmap_req_sg;
1770 }
1771
1772 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1773 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1774 ql_dbg(ql_dbg_user, vha, 0x70a9,
1775 "Dma mapping resulted in different sg counts "
1776 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1777 "%x dma_reply_sg_cnt: %x]\n",
1778 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1779 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1780 rval = EXT_STATUS_NO_MEMORY;
1781 goto done_unmap_sg;
1782 }
1783
1784 if (req_data_len != rsp_data_len) {
1785 rval = EXT_STATUS_BUSY;
1786 ql_log(ql_log_warn, vha, 0x70aa,
1787 "req_data_len != rsp_data_len\n");
1788 goto done_unmap_sg;
1789 }
1790
1791 req_data_len = bsg_job->request_payload.payload_len;
1792 rsp_data_len = bsg_job->reply_payload.payload_len;
1793
1794
1795 /* Alloc SRB structure */
1796 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1797 if (!sp) {
1798 ql_dbg(ql_dbg_user, vha, 0x70ac,
1799 "Alloc SRB structure failed\n");
1800 rval = EXT_STATUS_NO_MEMORY;
1801 goto done_unmap_sg;
1802 }
1803
1804 /*Populate srb->ctx with bidir ctx*/
1805 sp->u.bsg_job = bsg_job;
1806 sp->free = qla2x00_bsg_sp_free;
1807 sp->type = SRB_BIDI_CMD;
1808 sp->done = qla2x00_bsg_job_done;
1809
1810 /* Add the read and write sg count */
1811 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1812
1813 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1814 if (rval != EXT_STATUS_OK)
1815 goto done_free_srb;
1816 /* the bsg request will be completed in the interrupt handler */
1817 return rval;
1818
1819done_free_srb:
1820 mempool_free(sp, ha->srb_mempool);
1821done_unmap_sg:
1822 dma_unmap_sg(&ha->pdev->dev,
1823 bsg_job->reply_payload.sg_list,
1824 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1825done_unmap_req_sg:
1826 dma_unmap_sg(&ha->pdev->dev,
1827 bsg_job->request_payload.sg_list,
1828 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1829done:
1830
1831 /* Return an error vendor specific response
1832 * and complete the bsg request
1833 */
1834 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1835 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1836 bsg_job->reply->reply_payload_rcv_len = 0;
1837 bsg_job->reply->result = (DID_OK) << 16;
1838 bsg_job->job_done(bsg_job);
1839 /* Always retrun success, vendor rsp carries correct status */
1840 return 0;
1841}
1842
1843static int
1563qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) 1844qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1564{ 1845{
1565 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { 1846 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
@@ -1596,6 +1877,15 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1596 case QL_VND_WRITE_FRU_STATUS: 1877 case QL_VND_WRITE_FRU_STATUS:
1597 return qla2x00_write_fru_status(bsg_job); 1878 return qla2x00_write_fru_status(bsg_job);
1598 1879
1880 case QL_VND_WRITE_I2C:
1881 return qla2x00_write_i2c(bsg_job);
1882
1883 case QL_VND_READ_I2C:
1884 return qla2x00_read_i2c(bsg_job);
1885
1886 case QL_VND_DIAG_IO_CMD:
1887 return qla24xx_process_bidir_cmd(bsg_job);
1888
1599 default: 1889 default:
1600 bsg_job->reply->result = (DID_ERROR << 16); 1890 bsg_job->reply->result = (DID_ERROR << 16);
1601 bsg_job->job_done(bsg_job); 1891 bsg_job->job_done(bsg_job);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 70caa63a893..37b8b7ba742 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -19,21 +19,41 @@
19#define QL_VND_SET_FRU_VERSION 0x0B 19#define QL_VND_SET_FRU_VERSION 0x0B
20#define QL_VND_READ_FRU_STATUS 0x0C 20#define QL_VND_READ_FRU_STATUS 0x0C
21#define QL_VND_WRITE_FRU_STATUS 0x0D 21#define QL_VND_WRITE_FRU_STATUS 0x0D
22#define QL_VND_DIAG_IO_CMD 0x0A
23#define QL_VND_WRITE_I2C 0x10
24#define QL_VND_READ_I2C 0x11
22 25
23/* BSG Vendor specific subcode returns */ 26/* BSG Vendor specific subcode returns */
24#define EXT_STATUS_OK 0 27#define EXT_STATUS_OK 0
25#define EXT_STATUS_ERR 1 28#define EXT_STATUS_ERR 1
29#define EXT_STATUS_BUSY 2
26#define EXT_STATUS_INVALID_PARAM 6 30#define EXT_STATUS_INVALID_PARAM 6
31#define EXT_STATUS_DATA_OVERRUN 7
32#define EXT_STATUS_DATA_UNDERRUN 8
27#define EXT_STATUS_MAILBOX 11 33#define EXT_STATUS_MAILBOX 11
28#define EXT_STATUS_NO_MEMORY 17 34#define EXT_STATUS_NO_MEMORY 17
35#define EXT_STATUS_DEVICE_OFFLINE 22
36
37/*
38 * To support bidirectional iocb
39 * BSG Vendor specific returns
40 */
41#define EXT_STATUS_NOT_SUPPORTED 27
42#define EXT_STATUS_INVALID_CFG 28
43#define EXT_STATUS_DMA_ERR 29
44#define EXT_STATUS_TIMEOUT 30
45#define EXT_STATUS_THREAD_FAILED 31
46#define EXT_STATUS_DATA_CMP_FAILED 32
29 47
30/* BSG definations for interpreting CommandSent field */ 48/* BSG definations for interpreting CommandSent field */
31#define INT_DEF_LB_LOOPBACK_CMD 0 49#define INT_DEF_LB_LOOPBACK_CMD 0
32#define INT_DEF_LB_ECHO_CMD 1 50#define INT_DEF_LB_ECHO_CMD 1
33 51
34/* Loopback related definations */ 52/* Loopback related definations */
53#define INTERNAL_LOOPBACK 0xF1
35#define EXTERNAL_LOOPBACK 0xF2 54#define EXTERNAL_LOOPBACK 0xF2
36#define ENABLE_INTERNAL_LOOPBACK 0x02 55#define ENABLE_INTERNAL_LOOPBACK 0x02
56#define ENABLE_EXTERNAL_LOOPBACK 0x04
37#define INTERNAL_LOOPBACK_MASK 0x000E 57#define INTERNAL_LOOPBACK_MASK 0x000E
38#define MAX_ELS_FRAME_PAYLOAD 252 58#define MAX_ELS_FRAME_PAYLOAD 252
39#define ELS_OPCODE_BYTE 0x10 59#define ELS_OPCODE_BYTE 0x10
@@ -183,4 +203,12 @@ struct qla_status_reg {
183 uint8_t reserved[7]; 203 uint8_t reserved[7];
184} __packed; 204} __packed;
185 205
206struct qla_i2c_access {
207 uint16_t device;
208 uint16_t offset;
209 uint16_t option;
210 uint16_t length;
211 uint8_t buffer[0x40];
212} __packed;
213
186#endif 214#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index fdee5611f3e..44efe3cc79e 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -11,26 +11,31 @@
11 * ---------------------------------------------------------------------- 11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes | 12 * | Level | Last Value Used | Holes |
13 * ---------------------------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0122 | 0x4b,0xba,0xfa | 14 * | Module Init and Probe | 0x0124 | 0x4b,0xba,0xfa |
15 * | Mailbox commands | 0x1140 | 0x111a-0x111b | 15 * | Mailbox commands | 0x114f | 0x111a-0x111b |
16 * | | | 0x112c-0x112e | 16 * | | | 0x112c-0x112e |
17 * | | | 0x113a | 17 * | | | 0x113a |
18 * | Device Discovery | 0x2086 | 0x2020-0x2022 | 18 * | Device Discovery | 0x2087 | 0x2020-0x2022, |
19 * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 | 19 * | | | 0x2016 |
20 * | Queue Command and IO tracing | 0x3030 | 0x3006-0x300b |
21 * | | | 0x3027-0x3028 |
20 * | | | 0x302d-0x302e | 22 * | | | 0x302d-0x302e |
21 * | DPC Thread | 0x401c | 0x4002,0x4013 | 23 * | DPC Thread | 0x401d | 0x4002,0x4013 |
22 * | Async Events | 0x505f | 0x502b-0x502f | 24 * | Async Events | 0x5071 | 0x502b-0x502f |
23 * | | | 0x5047,0x5052 | 25 * | | | 0x5047,0x5052 |
24 * | Timer Routines | 0x6011 | | 26 * | Timer Routines | 0x6011 | |
25 * | User Space Interactions | 0x709f | 0x7018,0x702e, | 27 * | User Space Interactions | 0x70c3 | 0x7018,0x702e, |
26 * | | | 0x7039,0x7045, | 28 * | | | 0x7039,0x7045, |
27 * | | | 0x7073-0x7075, | 29 * | | | 0x7073-0x7075, |
28 * | | | 0x708c | 30 * | | | 0x708c, |
31 * | | | 0x70a5,0x70a6, |
32 * | | | 0x70a8,0x70ab, |
33 * | | | 0x70ad-0x70ae |
29 * | Task Management | 0x803c | 0x8025-0x8026 | 34 * | Task Management | 0x803c | 0x8025-0x8026 |
30 * | | | 0x800b,0x8039 | 35 * | | | 0x800b,0x8039 |
31 * | AER/EEH | 0x9011 | | 36 * | AER/EEH | 0x9011 | |
32 * | Virtual Port | 0xa007 | | 37 * | Virtual Port | 0xa007 | |
33 * | ISP82XX Specific | 0xb054 | 0xb024 | 38 * | ISP82XX Specific | 0xb084 | 0xb002,0xb024 |
34 * | MultiQ | 0xc00c | | 39 * | MultiQ | 0xc00c | |
35 * | Misc | 0xd010 | | 40 * | Misc | 0xd010 | |
36 * | Target Mode | 0xe06f | | 41 * | Target Mode | 0xe06f | |
@@ -2357,7 +2362,7 @@ ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2357 2362
2358/* 2363/*
2359 * This function is for formatting and logging debug information. 2364 * This function is for formatting and logging debug information.
2360 * It is to be used when vha is not available and pci is availble, 2365 * It is to be used when vha is not available and pci is available,
2361 * i.e., before host allocation. It formats the message and logs it 2366 * i.e., before host allocation. It formats the message and logs it
2362 * to the messages file. 2367 * to the messages file.
2363 * parameters: 2368 * parameters:
@@ -2452,7 +2457,7 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2452 2457
2453/* 2458/*
2454 * This function is for formatting and logging log messages. 2459 * This function is for formatting and logging log messages.
2455 * It is to be used when vha is not available and pci is availble, 2460 * It is to be used when vha is not available and pci is available,
2456 * i.e., before host allocation. It formats the message and logs 2461 * i.e., before host allocation. It formats the message and logs
2457 * it to the messages file. All the messages are logged irrespective 2462 * it to the messages file. All the messages are logged irrespective
2458 * of the value of ql2xextended_error_logging. 2463 * of the value of ql2xextended_error_logging.
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index f278df8cce0..8f911c0b1e7 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 39007f53aec..a9725bf5527 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -115,6 +115,82 @@
115#define WRT_REG_DWORD(addr, data) writel(data,addr) 115#define WRT_REG_DWORD(addr, data) writel(data,addr)
116 116
117/* 117/*
118 * ISP83XX specific remote register addresses
119 */
120#define QLA83XX_LED_PORT0 0x00201320
121#define QLA83XX_LED_PORT1 0x00201328
122#define QLA83XX_IDC_DEV_STATE 0x22102384
123#define QLA83XX_IDC_MAJOR_VERSION 0x22102380
124#define QLA83XX_IDC_MINOR_VERSION 0x22102398
125#define QLA83XX_IDC_DRV_PRESENCE 0x22102388
126#define QLA83XX_IDC_DRIVER_ACK 0x2210238c
127#define QLA83XX_IDC_CONTROL 0x22102390
128#define QLA83XX_IDC_AUDIT 0x22102394
129#define QLA83XX_IDC_LOCK_RECOVERY 0x2210239c
130#define QLA83XX_DRIVER_LOCKID 0x22102104
131#define QLA83XX_DRIVER_LOCK 0x8111c028
132#define QLA83XX_DRIVER_UNLOCK 0x8111c02c
133#define QLA83XX_FLASH_LOCKID 0x22102100
134#define QLA83XX_FLASH_LOCK 0x8111c010
135#define QLA83XX_FLASH_UNLOCK 0x8111c014
136#define QLA83XX_DEV_PARTINFO1 0x221023e0
137#define QLA83XX_DEV_PARTINFO2 0x221023e4
138#define QLA83XX_FW_HEARTBEAT 0x221020b0
139#define QLA83XX_PEG_HALT_STATUS1 0x221020a8
140#define QLA83XX_PEG_HALT_STATUS2 0x221020ac
141
142/* 83XX: Macros defining 8200 AEN Reason codes */
143#define IDC_DEVICE_STATE_CHANGE BIT_0
144#define IDC_PEG_HALT_STATUS_CHANGE BIT_1
145#define IDC_NIC_FW_REPORTED_FAILURE BIT_2
146#define IDC_HEARTBEAT_FAILURE BIT_3
147
148/* 83XX: Macros defining 8200 AEN Error-levels */
149#define ERR_LEVEL_NON_FATAL 0x1
150#define ERR_LEVEL_RECOVERABLE_FATAL 0x2
151#define ERR_LEVEL_UNRECOVERABLE_FATAL 0x4
152
153/* 83XX: Macros for IDC Version */
154#define QLA83XX_SUPP_IDC_MAJOR_VERSION 0x01
155#define QLA83XX_SUPP_IDC_MINOR_VERSION 0x0
156
157/* 83XX: Macros for scheduling dpc tasks */
158#define QLA83XX_NIC_CORE_RESET 0x1
159#define QLA83XX_IDC_STATE_HANDLER 0x2
160#define QLA83XX_NIC_CORE_UNRECOVERABLE 0x3
161
162/* 83XX: Macros for defining IDC-Control bits */
163#define QLA83XX_IDC_RESET_DISABLED BIT_0
164#define QLA83XX_IDC_GRACEFUL_RESET BIT_1
165
166/* 83XX: Macros for different timeouts */
167#define QLA83XX_IDC_INITIALIZATION_TIMEOUT 30
168#define QLA83XX_IDC_RESET_ACK_TIMEOUT 10
169#define QLA83XX_MAX_LOCK_RECOVERY_WAIT (2 * HZ)
170
171/* 83XX: Macros for defining class in DEV-Partition Info register */
172#define QLA83XX_CLASS_TYPE_NONE 0x0
173#define QLA83XX_CLASS_TYPE_NIC 0x1
174#define QLA83XX_CLASS_TYPE_FCOE 0x2
175#define QLA83XX_CLASS_TYPE_ISCSI 0x3
176
177/* 83XX: Macros for IDC Lock-Recovery stages */
178#define IDC_LOCK_RECOVERY_STAGE1 0x1 /* Stage1: Intent for
179 * lock-recovery
180 */
181#define IDC_LOCK_RECOVERY_STAGE2 0x2 /* Stage2: Perform lock-recovery */
182
183/* 83XX: Macros for IDC Audit type */
184#define IDC_AUDIT_TIMESTAMP 0x0 /* IDC-AUDIT: Record timestamp of
185 * dev-state change to NEED-RESET
186 * or NEED-QUIESCENT
187 */
188#define IDC_AUDIT_COMPLETION 0x1 /* IDC-AUDIT: Record duration of
189 * reset-recovery completion is
190 * second
191 */
192
193/*
118 * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an 194 * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an
119 * 133Mhz slot. 195 * 133Mhz slot.
120 */ 196 */
@@ -129,6 +205,7 @@
129#define MAX_FIBRE_DEVICES_2400 2048 205#define MAX_FIBRE_DEVICES_2400 2048
130#define MAX_FIBRE_DEVICES_LOOP 128 206#define MAX_FIBRE_DEVICES_LOOP 128
131#define MAX_FIBRE_DEVICES_MAX MAX_FIBRE_DEVICES_2400 207#define MAX_FIBRE_DEVICES_MAX MAX_FIBRE_DEVICES_2400
208#define LOOPID_MAP_SIZE (ha->max_fibre_devices)
132#define MAX_FIBRE_LUNS 0xFFFF 209#define MAX_FIBRE_LUNS 0xFFFF
133#define MAX_HOST_COUNT 16 210#define MAX_HOST_COUNT 16
134 211
@@ -259,6 +336,7 @@ struct srb_iocb {
259#define SRB_ADISC_CMD 6 336#define SRB_ADISC_CMD 6
260#define SRB_TM_CMD 7 337#define SRB_TM_CMD 7
261#define SRB_SCSI_CMD 8 338#define SRB_SCSI_CMD 8
339#define SRB_BIDI_CMD 9
262 340
263typedef struct srb { 341typedef struct srb {
264 atomic_t ref_count; 342 atomic_t ref_count;
@@ -594,6 +672,20 @@ typedef struct {
594#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */ 672#define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */
595#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */ 673#define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */
596 674
675/* 83XX FCoE specific */
676#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */
677
678/* Interrupt type codes */
679#define INTR_ROM_MB_SUCCESS 0x1
680#define INTR_ROM_MB_FAILED 0x2
681#define INTR_MB_SUCCESS 0x10
682#define INTR_MB_FAILED 0x11
683#define INTR_ASYNC_EVENT 0x12
684#define INTR_RSP_QUE_UPDATE 0x13
685#define INTR_RSP_QUE_UPDATE_83XX 0x14
686#define INTR_ATIO_QUE_UPDATE 0x1C
687#define INTR_ATIO_RSP_QUE_UPDATE 0x1D
688
597/* ISP mailbox loopback echo diagnostic error code */ 689/* ISP mailbox loopback echo diagnostic error code */
598#define MBS_LB_RESET 0x17 690#define MBS_LB_RESET 0x17
599/* 691/*
@@ -718,6 +810,7 @@ typedef struct {
718#define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */ 810#define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */
719#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */ 811#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */
720#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */ 812#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */
813#define MBC_PORT_RESET 0x120 /* Port Reset */
721#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */ 814#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */
722#define MBC_GET_PORT_CONFIG 0x123 /* Get port configuration */ 815#define MBC_GET_PORT_CONFIG 0x123 /* Get port configuration */
723 816
@@ -1375,9 +1468,10 @@ typedef struct {
1375} cont_a64_entry_t; 1468} cont_a64_entry_t;
1376 1469
1377#define PO_MODE_DIF_INSERT 0 1470#define PO_MODE_DIF_INSERT 0
1378#define PO_MODE_DIF_REMOVE BIT_0 1471#define PO_MODE_DIF_REMOVE 1
1379#define PO_MODE_DIF_PASS BIT_1 1472#define PO_MODE_DIF_PASS 2
1380#define PO_MODE_DIF_REPLACE (BIT_0 + BIT_1) 1473#define PO_MODE_DIF_REPLACE 3
1474#define PO_MODE_DIF_TCP_CKSUM 6
1381#define PO_ENABLE_DIF_BUNDLING BIT_8 1475#define PO_ENABLE_DIF_BUNDLING BIT_8
1382#define PO_ENABLE_INCR_GUARD_SEED BIT_3 1476#define PO_ENABLE_INCR_GUARD_SEED BIT_3
1383#define PO_DISABLE_INCR_REF_TAG BIT_5 1477#define PO_DISABLE_INCR_REF_TAG BIT_5
@@ -1509,6 +1603,13 @@ typedef struct {
1509#define CS_RETRY 0x82 /* Driver defined */ 1603#define CS_RETRY 0x82 /* Driver defined */
1510#define CS_LOOP_DOWN_ABORT 0x83 /* Driver defined */ 1604#define CS_LOOP_DOWN_ABORT 0x83 /* Driver defined */
1511 1605
1606#define CS_BIDIR_RD_OVERRUN 0x700
1607#define CS_BIDIR_RD_WR_OVERRUN 0x707
1608#define CS_BIDIR_RD_OVERRUN_WR_UNDERRUN 0x715
1609#define CS_BIDIR_RD_UNDERRUN 0x1500
1610#define CS_BIDIR_RD_UNDERRUN_WR_OVERRUN 0x1507
1611#define CS_BIDIR_RD_WR_UNDERRUN 0x1515
1612#define CS_BIDIR_DMA 0x200
1512/* 1613/*
1513 * Status entry status flags 1614 * Status entry status flags
1514 */ 1615 */
@@ -2373,6 +2474,11 @@ struct qla_statistics {
2373 uint64_t output_bytes; 2474 uint64_t output_bytes;
2374}; 2475};
2375 2476
2477struct bidi_statistics {
2478 unsigned long long io_count;
2479 unsigned long long transfer_bytes;
2480};
2481
2376/* Multi queue support */ 2482/* Multi queue support */
2377#define MBC_INITIALIZE_MULTIQ 0x1f 2483#define MBC_INITIALIZE_MULTIQ 0x1f
2378#define QLA_QUE_PAGE 0X1000 2484#define QLA_QUE_PAGE 0X1000
@@ -2509,14 +2615,16 @@ struct qla_hw_data {
2509 uint32_t disable_msix_handshake :1; 2615 uint32_t disable_msix_handshake :1;
2510 uint32_t fcp_prio_enabled :1; 2616 uint32_t fcp_prio_enabled :1;
2511 uint32_t isp82xx_fw_hung:1; 2617 uint32_t isp82xx_fw_hung:1;
2618 uint32_t nic_core_hung:1;
2512 2619
2513 uint32_t quiesce_owner:1; 2620 uint32_t quiesce_owner:1;
2514 uint32_t thermal_supported:1; 2621 uint32_t thermal_supported:1;
2515 uint32_t isp82xx_reset_hdlr_active:1; 2622 uint32_t nic_core_reset_hdlr_active:1;
2516 uint32_t isp82xx_reset_owner:1; 2623 uint32_t nic_core_reset_owner:1;
2517 uint32_t isp82xx_no_md_cap:1; 2624 uint32_t isp82xx_no_md_cap:1;
2518 uint32_t host_shutting_down:1; 2625 uint32_t host_shutting_down:1;
2519 /* 30 bits */ 2626 uint32_t idc_compl_status:1;
2627 /* 32 bits */
2520 } flags; 2628 } flags;
2521 2629
2522 /* This spinlock is used to protect "io transactions", you must 2630 /* This spinlock is used to protect "io transactions", you must
@@ -2670,6 +2778,16 @@ struct qla_hw_data {
2670#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS) 2778#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
2671#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED) 2779#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
2672#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha)) 2780#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha))
2781#define IS_BIDI_CAPABLE(ha) ((IS_QLA25XX(ha) || IS_QLA2031(ha)))
2782/* Bit 21 of fw_attributes decides the MCTP capabilities */
2783#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
2784 ((ha)->fw_attributes_ext[0] & BIT_0))
2785#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha))
2786#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha))
2787#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
2788#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha))
2789#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
2790 (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
2673 2791
2674 /* HBA serial number */ 2792 /* HBA serial number */
2675 uint8_t serial0; 2793 uint8_t serial0;
@@ -2753,6 +2871,7 @@ struct qla_hw_data {
2753 struct completion mbx_intr_comp; /* Used for completion notification */ 2871 struct completion mbx_intr_comp; /* Used for completion notification */
2754 struct completion dcbx_comp; /* For set port config notification */ 2872 struct completion dcbx_comp; /* For set port config notification */
2755 int notify_dcbx_comp; 2873 int notify_dcbx_comp;
2874 struct mutex selflogin_lock;
2756 2875
2757 /* Basic firmware related information. */ 2876 /* Basic firmware related information. */
2758 uint16_t fw_major_version; 2877 uint16_t fw_major_version;
@@ -2784,7 +2903,12 @@ struct qla_hw_data {
2784 int fw_dump_reading; 2903 int fw_dump_reading;
2785 dma_addr_t eft_dma; 2904 dma_addr_t eft_dma;
2786 void *eft; 2905 void *eft;
2787 2906/* Current size of mctp dump is 0x086064 bytes */
2907#define MCTP_DUMP_SIZE 0x086064
2908 dma_addr_t mctp_dump_dma;
2909 void *mctp_dump;
2910 int mctp_dumped;
2911 int mctp_dump_reading;
2788 uint32_t chain_offset; 2912 uint32_t chain_offset;
2789 struct dentry *dfs_dir; 2913 struct dentry *dfs_dir;
2790 struct dentry *dfs_fce; 2914 struct dentry *dfs_fce;
@@ -2896,8 +3020,8 @@ struct qla_hw_data {
2896 unsigned long mn_win_crb; 3020 unsigned long mn_win_crb;
2897 unsigned long ms_win_crb; 3021 unsigned long ms_win_crb;
2898 int qdr_sn_window; 3022 int qdr_sn_window;
2899 uint32_t nx_dev_init_timeout; 3023 uint32_t fcoe_dev_init_timeout;
2900 uint32_t nx_reset_timeout; 3024 uint32_t fcoe_reset_timeout;
2901 rwlock_t hw_lock; 3025 rwlock_t hw_lock;
2902 uint16_t portnum; /* port number */ 3026 uint16_t portnum; /* port number */
2903 int link_width; 3027 int link_width;
@@ -2918,6 +3042,20 @@ struct qla_hw_data {
2918 void *md_dump; 3042 void *md_dump;
2919 uint32_t md_dump_size; 3043 uint32_t md_dump_size;
2920 3044
3045 void *loop_id_map;
3046
3047 /* QLA83XX IDC specific fields */
3048 uint32_t idc_audit_ts;
3049
3050 /* DPC low-priority workqueue */
3051 struct workqueue_struct *dpc_lp_wq;
3052 struct work_struct idc_aen;
3053 /* DPC high-priority workqueue */
3054 struct workqueue_struct *dpc_hp_wq;
3055 struct work_struct nic_core_reset;
3056 struct work_struct idc_state_handler;
3057 struct work_struct nic_core_unrecoverable;
3058
2921 struct qlt_hw_data tgt; 3059 struct qlt_hw_data tgt;
2922}; 3060};
2923 3061
@@ -2985,6 +3123,13 @@ typedef struct scsi_qla_host {
2985 3123
2986 /* ISP configuration data. */ 3124 /* ISP configuration data. */
2987 uint16_t loop_id; /* Host adapter loop id */ 3125 uint16_t loop_id; /* Host adapter loop id */
3126 uint16_t self_login_loop_id; /* host adapter loop id
3127 * get it on self login
3128 */
3129 fc_port_t bidir_fcport; /* fcport used for bidir cmnds
3130 * no need of allocating it for
3131 * each command
3132 */
2988 3133
2989 port_id_t d_id; /* Host adapter port id */ 3134 port_id_t d_id; /* Host adapter port id */
2990 uint8_t marker_needed; 3135 uint8_t marker_needed;
@@ -3038,6 +3183,7 @@ typedef struct scsi_qla_host {
3038 int seconds_since_last_heartbeat; 3183 int seconds_since_last_heartbeat;
3039 struct fc_host_statistics fc_host_stat; 3184 struct fc_host_statistics fc_host_stat;
3040 struct qla_statistics qla_stats; 3185 struct qla_statistics qla_stats;
3186 struct bidi_statistics bidi_stats;
3041 3187
3042 atomic_t vref_count; 3188 atomic_t vref_count;
3043} scsi_qla_host_t; 3189} scsi_qla_host_t;
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 499c74e39ee..706c4f7bc7c 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 6d7d7758c79..59524aa0ab3 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -381,6 +381,44 @@ struct init_cb_24xx {
381/* 381/*
382 * ISP queue - command entry structure definition. 382 * ISP queue - command entry structure definition.
383 */ 383 */
384#define COMMAND_BIDIRECTIONAL 0x75
385struct cmd_bidir {
386 uint8_t entry_type; /* Entry type. */
387 uint8_t entry_count; /* Entry count. */
388 uint8_t sys_define; /* System defined */
389 uint8_t entry_status; /* Entry status. */
390
391 uint32_t handle; /* System handle. */
392
393 uint16_t nport_handle; /* N_PORT hanlde. */
394
395 uint16_t timeout; /* Commnad timeout. */
396
397 uint16_t wr_dseg_count; /* Write Data segment count. */
398 uint16_t rd_dseg_count; /* Read Data segment count. */
399
400 struct scsi_lun lun; /* FCP LUN (BE). */
401
402 uint16_t control_flags; /* Control flags. */
403#define BD_WRAP_BACK BIT_3
404#define BD_READ_DATA BIT_1
405#define BD_WRITE_DATA BIT_0
406
407 uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
408 uint32_t fcp_cmnd_dseg_address[2]; /* Data segment address. */
409
410 uint16_t reserved[2]; /* Reserved */
411
412 uint32_t rd_byte_count; /* Total Byte count Read. */
413 uint32_t wr_byte_count; /* Total Byte count write. */
414
415 uint8_t port_id[3]; /* PortID of destination port.*/
416 uint8_t vp_index;
417
418 uint32_t fcp_data_dseg_address[2]; /* Data segment address. */
419 uint16_t fcp_data_dseg_len; /* Data segment length. */
420};
421
384#define COMMAND_TYPE_6 0x48 /* Command Type 6 entry */ 422#define COMMAND_TYPE_6 0x48 /* Command Type 6 entry */
385struct cmd_type_6 { 423struct cmd_type_6 {
386 uint8_t entry_type; /* Entry type. */ 424 uint8_t entry_type; /* Entry type. */
@@ -1130,7 +1168,7 @@ struct mid_db_entry_24xx {
1130/* 1168/*
1131 * Virtual Port Control IOCB 1169 * Virtual Port Control IOCB
1132 */ 1170 */
1133#define VP_CTRL_IOCB_TYPE 0x30 /* Vitual Port Control entry. */ 1171#define VP_CTRL_IOCB_TYPE 0x30 /* Virtual Port Control entry. */
1134struct vp_ctrl_entry_24xx { 1172struct vp_ctrl_entry_24xx {
1135 uint8_t entry_type; /* Entry type. */ 1173 uint8_t entry_type; /* Entry type. */
1136 uint8_t entry_count; /* Entry count. */ 1174 uint8_t entry_count; /* Entry count. */
@@ -1166,7 +1204,7 @@ struct vp_ctrl_entry_24xx {
1166/* 1204/*
1167 * Modify Virtual Port Configuration IOCB 1205 * Modify Virtual Port Configuration IOCB
1168 */ 1206 */
1169#define VP_CONFIG_IOCB_TYPE 0x31 /* Vitual Port Config entry. */ 1207#define VP_CONFIG_IOCB_TYPE 0x31 /* Virtual Port Config entry. */
1170struct vp_config_entry_24xx { 1208struct vp_config_entry_24xx {
1171 uint8_t entry_type; /* Entry type. */ 1209 uint8_t entry_type; /* Entry type. */
1172 uint8_t entry_count; /* Entry count. */ 1210 uint8_t entry_count; /* Entry count. */
@@ -1502,7 +1540,10 @@ struct access_chip_rsp_84xx {
1502/* 1540/*
1503 * ISP83xx mailbox commands 1541 * ISP83xx mailbox commands
1504 */ 1542 */
1505#define MBC_WRITE_REMOTE_REG 0x0001 /* Write remote register */ 1543#define MBC_WRITE_REMOTE_REG 0x0001 /* Write remote register */
1544#define MBC_READ_REMOTE_REG 0x0009 /* Read remote register */
1545#define MBC_RESTART_NIC_FIRMWARE 0x003d /* Restart NIC firmware */
1546#define MBC_SET_ACCESS_CONTROL 0x003e /* Access control command */
1506 1547
1507/* Flash access control option field bit definitions */ 1548/* Flash access control option field bit definitions */
1508#define FAC_OPT_FORCE_SEMAPHORE BIT_15 1549#define FAC_OPT_FORCE_SEMAPHORE BIT_15
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9eacd2df111..6acb39785a4 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -48,7 +48,7 @@ extern void qla2x00_update_fcports(scsi_qla_host_t *);
48 48
49extern int qla2x00_abort_isp(scsi_qla_host_t *); 49extern int qla2x00_abort_isp(scsi_qla_host_t *);
50extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *); 50extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
51extern void qla82xx_quiescent_state_cleanup(scsi_qla_host_t *); 51extern void qla2x00_quiesce_io(scsi_qla_host_t *);
52 52
53extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); 53extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
54 54
@@ -76,6 +76,14 @@ extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
76 76
77extern fc_port_t * 77extern fc_port_t *
78qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t ); 78qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t );
79
80extern int __qla83xx_set_idc_control(scsi_qla_host_t *, uint32_t);
81extern int __qla83xx_get_idc_control(scsi_qla_host_t *, uint32_t *);
82extern void qla83xx_idc_audit(scsi_qla_host_t *, int);
83extern int qla83xx_nic_core_reset(scsi_qla_host_t *);
84extern void qla83xx_reset_ownership(scsi_qla_host_t *);
85extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
86
79/* 87/*
80 * Global Data in qla_os.c source file. 88 * Global Data in qla_os.c source file.
81 */ 89 */
@@ -133,6 +141,20 @@ extern void qla2x00_relogin(struct scsi_qla_host *);
133extern void qla2x00_do_work(struct scsi_qla_host *); 141extern void qla2x00_do_work(struct scsi_qla_host *);
134extern void qla2x00_free_fcports(struct scsi_qla_host *); 142extern void qla2x00_free_fcports(struct scsi_qla_host *);
135 143
144extern void qla83xx_schedule_work(scsi_qla_host_t *, int);
145extern void qla83xx_service_idc_aen(struct work_struct *);
146extern void qla83xx_nic_core_unrecoverable_work(struct work_struct *);
147extern void qla83xx_idc_state_handler_work(struct work_struct *);
148extern void qla83xx_nic_core_reset_work(struct work_struct *);
149
150extern void qla83xx_idc_lock(scsi_qla_host_t *, uint16_t);
151extern void qla83xx_idc_unlock(scsi_qla_host_t *, uint16_t);
152extern int qla83xx_idc_state_handler(scsi_qla_host_t *);
153extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha);
154extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha);
155extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
156extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
157
136/* 158/*
137 * Global Functions in qla_mid.c source file. 159 * Global Functions in qla_mid.c source file.
138 */ 160 */
@@ -188,6 +210,8 @@ extern int qla2x00_start_sp(srb_t *);
188extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t); 210extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
189extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); 211extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
190extern int qla24xx_dif_start_scsi(srb_t *); 212extern int qla24xx_dif_start_scsi(srb_t *);
213extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
214extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
191 215
192extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); 216extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
193extern int qla2x00_issue_marker(scsi_qla_host_t *, int); 217extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
@@ -376,6 +400,9 @@ qla81xx_set_port_config(scsi_qla_host_t *, uint16_t *);
376extern int 400extern int
377qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *); 401qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *);
378 402
403extern int
404qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
405
379/* 406/*
380 * Global Function Prototypes in qla_isr.c source file. 407 * Global Function Prototypes in qla_isr.c source file.
381 */ 408 */
@@ -419,7 +446,11 @@ extern void qla24xx_beacon_blink(struct scsi_qla_host *);
419extern void qla83xx_beacon_blink(struct scsi_qla_host *); 446extern void qla83xx_beacon_blink(struct scsi_qla_host *);
420extern int qla82xx_beacon_on(struct scsi_qla_host *); 447extern int qla82xx_beacon_on(struct scsi_qla_host *);
421extern int qla82xx_beacon_off(struct scsi_qla_host *); 448extern int qla82xx_beacon_off(struct scsi_qla_host *);
422extern int qla83xx_write_remote_reg(struct scsi_qla_host *, uint32_t, uint32_t); 449extern int qla83xx_wr_reg(scsi_qla_host_t *, uint32_t, uint32_t);
450extern int qla83xx_rd_reg(scsi_qla_host_t *, uint32_t, uint32_t *);
451extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *);
452extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t,
453 uint32_t, uint16_t *);
423 454
424extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *, 455extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
425 uint32_t, uint32_t); 456 uint32_t, uint32_t);
@@ -527,7 +558,6 @@ extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
527/* PCI related functions */ 558/* PCI related functions */
528extern int qla82xx_pci_config(struct scsi_qla_host *); 559extern int qla82xx_pci_config(struct scsi_qla_host *);
529extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int); 560extern int qla82xx_pci_mem_read_2M(struct qla_hw_data *, u64, void *, int);
530extern char *qla82xx_pci_info_str(struct scsi_qla_host *, char *);
531extern int qla82xx_pci_region_offset(struct pci_dev *, int); 561extern int qla82xx_pci_region_offset(struct pci_dev *, int);
532extern int qla82xx_iospace_config(struct qla_hw_data *); 562extern int qla82xx_iospace_config(struct qla_hw_data *);
533 563
@@ -580,6 +610,7 @@ extern uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *, uint32_t);
580extern int qla82xx_idc_lock(struct qla_hw_data *); 610extern int qla82xx_idc_lock(struct qla_hw_data *);
581extern void qla82xx_idc_unlock(struct qla_hw_data *); 611extern void qla82xx_idc_unlock(struct qla_hw_data *);
582extern int qla82xx_device_state_handler(scsi_qla_host_t *); 612extern int qla82xx_device_state_handler(scsi_qla_host_t *);
613extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *);
583extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *); 614extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *);
584 615
585extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, 616extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 05260d25fe4..f4e4bd7c3f4 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -1131,7 +1131,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1131 return ret; 1131 return ret;
1132 1132
1133 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 1133 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1134 0xfa, mb, BIT_1|BIT_0); 1134 0xfa, mb, BIT_1);
1135 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) { 1135 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1136 if (rval == QLA_MEMORY_ALLOC_FAILED) 1136 if (rval == QLA_MEMORY_ALLOC_FAILED)
1137 ql_dbg(ql_dbg_disc, vha, 0x2085, 1137 ql_dbg(ql_dbg_disc, vha, 0x2085,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index a44653b4216..799a58bb985 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -77,7 +77,7 @@ qla2x00_sp_free(void *data, void *ptr)
77 77
78/* Asynchronous Login/Logout Routines -------------------------------------- */ 78/* Asynchronous Login/Logout Routines -------------------------------------- */
79 79
80static inline unsigned long 80unsigned long
81qla2x00_get_async_timeout(struct scsi_qla_host *vha) 81qla2x00_get_async_timeout(struct scsi_qla_host *vha)
82{ 82{
83 unsigned long tmo; 83 unsigned long tmo;
@@ -429,6 +429,79 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
429/* QLogic ISP2x00 Hardware Support Functions. */ 429/* QLogic ISP2x00 Hardware Support Functions. */
430/****************************************************************************/ 430/****************************************************************************/
431 431
432int
433qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
434{
435 int rval = QLA_SUCCESS;
436 struct qla_hw_data *ha = vha->hw;
437 uint32_t idc_major_ver, idc_minor_ver;
438 uint16_t config[4];
439
440 qla83xx_idc_lock(vha, 0);
441
442 /* SV: TODO: Assign initialization timeout from
443 * flash-info / other param
444 */
445 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
446 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
447
448 /* Set our fcoe function presence */
449 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
450 ql_dbg(ql_dbg_p3p, vha, 0xb077,
451 "Error while setting DRV-Presence.\n");
452 rval = QLA_FUNCTION_FAILED;
453 goto exit;
454 }
455
456 /* Decide the reset ownership */
457 qla83xx_reset_ownership(vha);
458
459 /*
460 * On first protocol driver load:
461 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
462 * register.
463 * Others: Check compatibility with current IDC Major version.
464 */
465 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
466 if (ha->flags.nic_core_reset_owner) {
467 /* Set IDC Major version */
468 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
469 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
470
471 /* Clearing IDC-Lock-Recovery register */
472 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
473 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
474 /*
475 * Clear further IDC participation if we are not compatible with
476 * the current IDC Major Version.
477 */
478 ql_log(ql_log_warn, vha, 0xb07d,
479 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
480 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
481 __qla83xx_clear_drv_presence(vha);
482 rval = QLA_FUNCTION_FAILED;
483 goto exit;
484 }
485 /* Each function sets its supported Minor version. */
486 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
487 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
488 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
489
490 if (ha->flags.nic_core_reset_owner) {
491 memset(config, 0, sizeof(config));
492 if (!qla81xx_get_port_config(vha, config))
493 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
494 QLA8XXX_DEV_READY);
495 }
496
497 rval = qla83xx_idc_state_handler(vha);
498
499exit:
500 qla83xx_idc_unlock(vha, 0);
501
502 return rval;
503}
504
432/* 505/*
433* qla2x00_initialize_adapter 506* qla2x00_initialize_adapter
434* Initialize board. 507* Initialize board.
@@ -537,6 +610,14 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
537 } 610 }
538 } 611 }
539 612
613 /* Load the NIC Core f/w if we are the first protocol driver. */
614 if (IS_QLA8031(ha)) {
615 rval = qla83xx_nic_core_fw_load(vha);
616 if (rval)
617 ql_log(ql_log_warn, vha, 0x0124,
618 "Error in initializing NIC Core f/w.\n");
619 }
620
540 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) 621 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
541 qla24xx_read_fcp_prio_cfg(vha); 622 qla24xx_read_fcp_prio_cfg(vha);
542 623
@@ -686,7 +767,7 @@ qla24xx_pci_config(scsi_qla_host_t *vha)
686 767
687 /* PCIe -- adjust Maximum Read Request Size (2048). */ 768 /* PCIe -- adjust Maximum Read Request Size (2048). */
688 if (pci_is_pcie(ha->pdev)) 769 if (pci_is_pcie(ha->pdev))
689 pcie_set_readrq(ha->pdev, 2048); 770 pcie_set_readrq(ha->pdev, 4096);
690 771
691 pci_disable_rom(ha->pdev); 772 pci_disable_rom(ha->pdev);
692 773
@@ -722,7 +803,7 @@ qla25xx_pci_config(scsi_qla_host_t *vha)
722 803
723 /* PCIe -- adjust Maximum Read Request Size (2048). */ 804 /* PCIe -- adjust Maximum Read Request Size (2048). */
724 if (pci_is_pcie(ha->pdev)) 805 if (pci_is_pcie(ha->pdev))
725 pcie_set_readrq(ha->pdev, 2048); 806 pcie_set_readrq(ha->pdev, 4096);
726 807
727 pci_disable_rom(ha->pdev); 808 pci_disable_rom(ha->pdev);
728 809
@@ -1480,7 +1561,8 @@ enable_82xx_npiv:
1480 "ISP Firmware failed checksum.\n"); 1561 "ISP Firmware failed checksum.\n");
1481 goto failed; 1562 goto failed;
1482 } 1563 }
1483 } 1564 } else
1565 goto failed;
1484 1566
1485 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { 1567 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1486 /* Enable proper parity. */ 1568 /* Enable proper parity. */
@@ -1825,7 +1907,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1825 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n"); 1907 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
1826 1908
1827 if (ha->flags.npiv_supported) { 1909 if (ha->flags.npiv_supported) {
1828 if (ha->operating_mode == LOOP) 1910 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
1829 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; 1911 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
1830 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); 1912 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
1831 } 1913 }
@@ -2682,11 +2764,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2682 new_fcport = NULL; 2764 new_fcport = NULL;
2683 entries = MAX_FIBRE_DEVICES_LOOP; 2765 entries = MAX_FIBRE_DEVICES_LOOP;
2684 2766
2685 ql_dbg(ql_dbg_disc, vha, 0x2016,
2686 "Getting FCAL position map.\n");
2687 if (ql2xextended_error_logging & ql_dbg_disc)
2688 qla2x00_get_fcal_position_map(vha, NULL);
2689
2690 /* Get list of logged in devices. */ 2767 /* Get list of logged in devices. */
2691 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha)); 2768 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
2692 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, 2769 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
@@ -2753,6 +2830,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2753 if (loop_id > LAST_LOCAL_LOOP_ID) 2830 if (loop_id > LAST_LOCAL_LOOP_ID)
2754 continue; 2831 continue;
2755 2832
2833 memset(new_fcport, 0, sizeof(fc_port_t));
2834
2756 /* Fill in member data. */ 2835 /* Fill in member data. */
2757 new_fcport->d_id.b.domain = domain; 2836 new_fcport->d_id.b.domain = domain;
2758 new_fcport->d_id.b.area = area; 2837 new_fcport->d_id.b.area = area;
@@ -3285,7 +3364,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3285 */ 3364 */
3286 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { 3365 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3287 fcport->d_id.b24 = new_fcport->d_id.b24; 3366 fcport->d_id.b24 = new_fcport->d_id.b24;
3288 fcport->loop_id = FC_NO_LOOP_ID; 3367 qla2x00_clear_loop_id(fcport);
3289 fcport->flags |= (FCF_FABRIC_DEVICE | 3368 fcport->flags |= (FCF_FABRIC_DEVICE |
3290 FCF_LOGIN_NEEDED); 3369 FCF_LOGIN_NEEDED);
3291 break; 3370 break;
@@ -3306,7 +3385,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3306 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3385 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3307 fcport->d_id.b.domain, fcport->d_id.b.area, 3386 fcport->d_id.b.domain, fcport->d_id.b.area,
3308 fcport->d_id.b.al_pa); 3387 fcport->d_id.b.al_pa);
3309 fcport->loop_id = FC_NO_LOOP_ID; 3388 qla2x00_clear_loop_id(fcport);
3310 } 3389 }
3311 3390
3312 break; 3391 break;
@@ -3352,71 +3431,32 @@ int
3352qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) 3431qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3353{ 3432{
3354 int rval; 3433 int rval;
3355 int found;
3356 fc_port_t *fcport;
3357 uint16_t first_loop_id;
3358 struct qla_hw_data *ha = vha->hw; 3434 struct qla_hw_data *ha = vha->hw;
3359 struct scsi_qla_host *vp;
3360 struct scsi_qla_host *tvp;
3361 unsigned long flags = 0; 3435 unsigned long flags = 0;
3362 3436
3363 rval = QLA_SUCCESS; 3437 rval = QLA_SUCCESS;
3364 3438
3365 /* Save starting loop ID. */ 3439 spin_lock_irqsave(&ha->vport_slock, flags);
3366 first_loop_id = dev->loop_id;
3367
3368 for (;;) {
3369 /* Skip loop ID if already used by adapter. */
3370 if (dev->loop_id == vha->loop_id)
3371 dev->loop_id++;
3372
3373 /* Skip reserved loop IDs. */
3374 while (qla2x00_is_reserved_id(vha, dev->loop_id))
3375 dev->loop_id++;
3376
3377 /* Reset loop ID if passed the end. */
3378 if (dev->loop_id > ha->max_loop_id) {
3379 /* first loop ID. */
3380 dev->loop_id = ha->min_external_loopid;
3381 }
3382
3383 /* Check for loop ID being already in use. */
3384 found = 0;
3385 fcport = NULL;
3386
3387 spin_lock_irqsave(&ha->vport_slock, flags);
3388 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3389 list_for_each_entry(fcport, &vp->vp_fcports, list) {
3390 if (fcport->loop_id == dev->loop_id &&
3391 fcport != dev) {
3392 /* ID possibly in use */
3393 found++;
3394 break;
3395 }
3396 }
3397 if (found)
3398 break;
3399 }
3400 spin_unlock_irqrestore(&ha->vport_slock, flags);
3401 3440
3402 /* If not in use then it is free to use. */ 3441 dev->loop_id = find_first_zero_bit(ha->loop_id_map,
3403 if (!found) { 3442 LOOPID_MAP_SIZE);
3404 ql_dbg(ql_dbg_disc, dev->vha, 0x2086, 3443 if (dev->loop_id >= LOOPID_MAP_SIZE ||
3405 "Assigning new loopid=%x, portid=%x.\n", 3444 qla2x00_is_reserved_id(vha, dev->loop_id)) {
3406 dev->loop_id, dev->d_id.b24); 3445 dev->loop_id = FC_NO_LOOP_ID;
3407 break; 3446 rval = QLA_FUNCTION_FAILED;
3408 } 3447 } else
3448 set_bit(dev->loop_id, ha->loop_id_map);
3409 3449
3410 /* ID in use. Try next value. */ 3450 spin_unlock_irqrestore(&ha->vport_slock, flags);
3411 dev->loop_id++;
3412 3451
3413 /* If wrap around. No free ID to use. */ 3452 if (rval == QLA_SUCCESS)
3414 if (dev->loop_id == first_loop_id) { 3453 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
3415 dev->loop_id = FC_NO_LOOP_ID; 3454 "Assigning new loopid=%x, portid=%x.\n",
3416 rval = QLA_FUNCTION_FAILED; 3455 dev->loop_id, dev->d_id.b24);
3417 break; 3456 else
3418 } 3457 ql_log(ql_log_warn, dev->vha, 0x2087,
3419 } 3458 "No loop_id's available, portid=%x.\n",
3459 dev->d_id.b24);
3420 3460
3421 return (rval); 3461 return (rval);
3422} 3462}
@@ -3616,7 +3656,7 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3616 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3656 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3617 fcport->d_id.b.domain, fcport->d_id.b.area, 3657 fcport->d_id.b.domain, fcport->d_id.b.area,
3618 fcport->d_id.b.al_pa); 3658 fcport->d_id.b.al_pa);
3619 fcport->loop_id = FC_NO_LOOP_ID; 3659 qla2x00_clear_loop_id(fcport);
3620 fcport->login_retry = 0; 3660 fcport->login_retry = 0;
3621 3661
3622 rval = 3; 3662 rval = 3;
@@ -3775,8 +3815,363 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3775 spin_unlock_irqrestore(&ha->vport_slock, flags); 3815 spin_unlock_irqrestore(&ha->vport_slock, flags);
3776} 3816}
3777 3817
3818/* Assumes idc_lock always held on entry */
3819void
3820qla83xx_reset_ownership(scsi_qla_host_t *vha)
3821{
3822 struct qla_hw_data *ha = vha->hw;
3823 uint32_t drv_presence, drv_presence_mask;
3824 uint32_t dev_part_info1, dev_part_info2, class_type;
3825 uint32_t class_type_mask = 0x3;
3826 uint16_t fcoe_other_function = 0xffff, i;
3827
3828 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
3829
3830 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
3831 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
3832 for (i = 0; i < 8; i++) {
3833 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
3834 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
3835 (i != ha->portnum)) {
3836 fcoe_other_function = i;
3837 break;
3838 }
3839 }
3840 if (fcoe_other_function == 0xffff) {
3841 for (i = 0; i < 8; i++) {
3842 class_type = ((dev_part_info2 >> (i * 4)) &
3843 class_type_mask);
3844 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
3845 ((i + 8) != ha->portnum)) {
3846 fcoe_other_function = i + 8;
3847 break;
3848 }
3849 }
3850 }
3851 /*
3852 * Prepare drv-presence mask based on fcoe functions present.
3853 * However consider only valid physical fcoe function numbers (0-15).
3854 */
3855 drv_presence_mask = ~((1 << (ha->portnum)) |
3856 ((fcoe_other_function == 0xffff) ?
3857 0 : (1 << (fcoe_other_function))));
3858
3859 /* We are the reset owner iff:
3860 * - No other protocol drivers present.
3861 * - This is the lowest among fcoe functions. */
3862 if (!(drv_presence & drv_presence_mask) &&
3863 (ha->portnum < fcoe_other_function)) {
3864 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
3865 "This host is Reset owner.\n");
3866 ha->flags.nic_core_reset_owner = 1;
3867 }
3868}
3869
3870int
3871__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
3872{
3873 int rval = QLA_SUCCESS;
3874 struct qla_hw_data *ha = vha->hw;
3875 uint32_t drv_ack;
3876
3877 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
3878 if (rval == QLA_SUCCESS) {
3879 drv_ack |= (1 << ha->portnum);
3880 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
3881 }
3882
3883 return rval;
3884}
3885
3886int
3887qla83xx_set_drv_ack(scsi_qla_host_t *vha)
3888{
3889 int rval = QLA_SUCCESS;
3890
3891 qla83xx_idc_lock(vha, 0);
3892 rval = __qla83xx_set_drv_ack(vha);
3893 qla83xx_idc_unlock(vha, 0);
3894
3895 return rval;
3896}
3897
3898int
3899__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
3900{
3901 int rval = QLA_SUCCESS;
3902 struct qla_hw_data *ha = vha->hw;
3903 uint32_t drv_ack;
3904
3905 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
3906 if (rval == QLA_SUCCESS) {
3907 drv_ack &= ~(1 << ha->portnum);
3908 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
3909 }
3910
3911 return rval;
3912}
3913
3914int
3915qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
3916{
3917 int rval = QLA_SUCCESS;
3918
3919 qla83xx_idc_lock(vha, 0);
3920 rval = __qla83xx_clear_drv_ack(vha);
3921 qla83xx_idc_unlock(vha, 0);
3922
3923 return rval;
3924}
3925
3926const char *
3927qla83xx_dev_state_to_string(uint32_t dev_state)
3928{
3929 switch (dev_state) {
3930 case QLA8XXX_DEV_COLD:
3931 return "COLD/RE-INIT";
3932 case QLA8XXX_DEV_INITIALIZING:
3933 return "INITIALIZING";
3934 case QLA8XXX_DEV_READY:
3935 return "READY";
3936 case QLA8XXX_DEV_NEED_RESET:
3937 return "NEED RESET";
3938 case QLA8XXX_DEV_NEED_QUIESCENT:
3939 return "NEED QUIESCENT";
3940 case QLA8XXX_DEV_FAILED:
3941 return "FAILED";
3942 case QLA8XXX_DEV_QUIESCENT:
3943 return "QUIESCENT";
3944 default:
3945 return "Unknown";
3946 }
3947}
3948
3949/* Assumes idc-lock always held on entry */
3950void
3951qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
3952{
3953 struct qla_hw_data *ha = vha->hw;
3954 uint32_t idc_audit_reg = 0, duration_secs = 0;
3955
3956 switch (audit_type) {
3957 case IDC_AUDIT_TIMESTAMP:
3958 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
3959 idc_audit_reg = (ha->portnum) |
3960 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
3961 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
3962 break;
3963
3964 case IDC_AUDIT_COMPLETION:
3965 duration_secs = ((jiffies_to_msecs(jiffies) -
3966 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
3967 idc_audit_reg = (ha->portnum) |
3968 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
3969 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
3970 break;
3971
3972 default:
3973 ql_log(ql_log_warn, vha, 0xb078,
3974 "Invalid audit type specified.\n");
3975 break;
3976 }
3977}
3978
3979/* Assumes idc_lock always held on entry */
3980int
3981qla83xx_initiating_reset(scsi_qla_host_t *vha)
3982{
3983 struct qla_hw_data *ha = vha->hw;
3984 uint32_t idc_control, dev_state;
3985
3986 __qla83xx_get_idc_control(vha, &idc_control);
3987 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
3988 ql_log(ql_log_info, vha, 0xb080,
3989 "NIC Core reset has been disabled. idc-control=0x%x\n",
3990 idc_control);
3991 return QLA_FUNCTION_FAILED;
3992 }
3993
3994 /* Set NEED-RESET iff in READY state and we are the reset-owner */
3995 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
3996 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
3997 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
3998 QLA8XXX_DEV_NEED_RESET);
3999 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
4000 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
4001 } else {
4002 const char *state = qla83xx_dev_state_to_string(dev_state);
4003 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
4004
4005 /* SV: XXX: Is timeout required here? */
4006 /* Wait for IDC state change READY -> NEED_RESET */
4007 while (dev_state == QLA8XXX_DEV_READY) {
4008 qla83xx_idc_unlock(vha, 0);
4009 msleep(200);
4010 qla83xx_idc_lock(vha, 0);
4011 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
4012 }
4013 }
4014
4015 /* Send IDC ack by writing to drv-ack register */
4016 __qla83xx_set_drv_ack(vha);
4017
4018 return QLA_SUCCESS;
4019}
4020
4021int
4022__qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
4023{
4024 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
4025}
4026
4027int
4028qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
4029{
4030 int rval = QLA_SUCCESS;
4031
4032 qla83xx_idc_lock(vha, 0);
4033 rval = __qla83xx_set_idc_control(vha, idc_control);
4034 qla83xx_idc_unlock(vha, 0);
4035
4036 return rval;
4037}
4038
4039int
4040__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
4041{
4042 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
4043}
4044
4045int
4046qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
4047{
4048 int rval = QLA_SUCCESS;
4049
4050 qla83xx_idc_lock(vha, 0);
4051 rval = __qla83xx_get_idc_control(vha, idc_control);
4052 qla83xx_idc_unlock(vha, 0);
4053
4054 return rval;
4055}
4056
4057int
4058qla83xx_check_driver_presence(scsi_qla_host_t *vha)
4059{
4060 uint32_t drv_presence = 0;
4061 struct qla_hw_data *ha = vha->hw;
4062
4063 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4064 if (drv_presence & (1 << ha->portnum))
4065 return QLA_SUCCESS;
4066 else
4067 return QLA_TEST_FAILED;
4068}
4069
4070int
4071qla83xx_nic_core_reset(scsi_qla_host_t *vha)
4072{
4073 int rval = QLA_SUCCESS;
4074 struct qla_hw_data *ha = vha->hw;
4075
4076 ql_dbg(ql_dbg_p3p, vha, 0xb058,
4077 "Entered %s().\n", __func__);
4078
4079 if (vha->device_flags & DFLG_DEV_FAILED) {
4080 ql_log(ql_log_warn, vha, 0xb059,
4081 "Device in unrecoverable FAILED state.\n");
4082 return QLA_FUNCTION_FAILED;
4083 }
4084
4085 qla83xx_idc_lock(vha, 0);
4086
4087 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
4088 ql_log(ql_log_warn, vha, 0xb05a,
4089 "Function=0x%x has been removed from IDC participation.\n",
4090 ha->portnum);
4091 rval = QLA_FUNCTION_FAILED;
4092 goto exit;
4093 }
4094
4095 qla83xx_reset_ownership(vha);
4096
4097 rval = qla83xx_initiating_reset(vha);
4098
4099 /*
4100 * Perform reset if we are the reset-owner,
4101 * else wait till IDC state changes to READY/FAILED.
4102 */
4103 if (rval == QLA_SUCCESS) {
4104 rval = qla83xx_idc_state_handler(vha);
4105
4106 if (rval == QLA_SUCCESS)
4107 ha->flags.nic_core_hung = 0;
4108 __qla83xx_clear_drv_ack(vha);
4109 }
4110
4111exit:
4112 qla83xx_idc_unlock(vha, 0);
4113
4114 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
4115
4116 return rval;
4117}
4118
4119int
4120qla2xxx_mctp_dump(scsi_qla_host_t *vha)
4121{
4122 struct qla_hw_data *ha = vha->hw;
4123 int rval = QLA_FUNCTION_FAILED;
4124
4125 if (!IS_MCTP_CAPABLE(ha)) {
4126 /* This message can be removed from the final version */
4127 ql_log(ql_log_info, vha, 0x506d,
4128 "This board is not MCTP capable\n");
4129 return rval;
4130 }
4131
4132 if (!ha->mctp_dump) {
4133 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
4134 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
4135
4136 if (!ha->mctp_dump) {
4137 ql_log(ql_log_warn, vha, 0x506e,
4138 "Failed to allocate memory for mctp dump\n");
4139 return rval;
4140 }
4141 }
4142
4143#define MCTP_DUMP_STR_ADDR 0x00000000
4144 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
4145 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
4146 if (rval != QLA_SUCCESS) {
4147 ql_log(ql_log_warn, vha, 0x506f,
4148 "Failed to capture mctp dump\n");
4149 } else {
4150 ql_log(ql_log_info, vha, 0x5070,
4151 "Mctp dump capture for host (%ld/%p).\n",
4152 vha->host_no, ha->mctp_dump);
4153 ha->mctp_dumped = 1;
4154 }
4155
4156 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
4157 ha->flags.nic_core_reset_hdlr_active = 1;
4158 rval = qla83xx_restart_nic_firmware(vha);
4159 if (rval)
4160 /* NIC Core reset failed. */
4161 ql_log(ql_log_warn, vha, 0x5071,
4162 "Failed to restart nic firmware\n");
4163 else
4164 ql_dbg(ql_dbg_p3p, vha, 0xb084,
4165 "Restarted NIC firmware successfully.\n");
4166 ha->flags.nic_core_reset_hdlr_active = 0;
4167 }
4168
4169 return rval;
4170
4171}
4172
3778/* 4173/*
3779* qla82xx_quiescent_state_cleanup 4174* qla2x00_quiesce_io
3780* Description: This function will block the new I/Os 4175* Description: This function will block the new I/Os
3781* Its not aborting any I/Os as context 4176* Its not aborting any I/Os as context
3782* is not destroyed during quiescence 4177* is not destroyed during quiescence
@@ -3784,20 +4179,20 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3784* return : void 4179* return : void
3785*/ 4180*/
3786void 4181void
3787qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha) 4182qla2x00_quiesce_io(scsi_qla_host_t *vha)
3788{ 4183{
3789 struct qla_hw_data *ha = vha->hw; 4184 struct qla_hw_data *ha = vha->hw;
3790 struct scsi_qla_host *vp; 4185 struct scsi_qla_host *vp;
3791 4186
3792 ql_dbg(ql_dbg_p3p, vha, 0xb002, 4187 ql_dbg(ql_dbg_dpc, vha, 0x401d,
3793 "Performing ISP error recovery - ha=%p.\n", ha); 4188 "Quiescing I/O - ha=%p.\n", ha);
3794 4189
3795 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 4190 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
3796 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 4191 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3797 atomic_set(&vha->loop_state, LOOP_DOWN); 4192 atomic_set(&vha->loop_state, LOOP_DOWN);
3798 qla2x00_mark_all_devices_lost(vha, 0); 4193 qla2x00_mark_all_devices_lost(vha, 0);
3799 list_for_each_entry(vp, &ha->vp_list, list) 4194 list_for_each_entry(vp, &ha->vp_list, list)
3800 qla2x00_mark_all_devices_lost(vha, 0); 4195 qla2x00_mark_all_devices_lost(vp, 0);
3801 } else { 4196 } else {
3802 if (!atomic_read(&vha->loop_down_timer)) 4197 if (!atomic_read(&vha->loop_down_timer))
3803 atomic_set(&vha->loop_down_timer, 4198 atomic_set(&vha->loop_down_timer,
@@ -3913,6 +4308,14 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
3913 if (vha->flags.online) { 4308 if (vha->flags.online) {
3914 qla2x00_abort_isp_cleanup(vha); 4309 qla2x00_abort_isp_cleanup(vha);
3915 4310
4311 if (IS_QLA8031(ha)) {
4312 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
4313 "Clearing fcoe driver presence.\n");
4314 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
4315 ql_dbg(ql_dbg_p3p, vha, 0xb073,
4316 "Error while clearing DRV-Presence.\n");
4317 }
4318
3916 if (unlikely(pci_channel_offline(ha->pdev) && 4319 if (unlikely(pci_channel_offline(ha->pdev) &&
3917 ha->flags.pci_channel_io_perm_failure)) { 4320 ha->flags.pci_channel_io_perm_failure)) {
3918 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 4321 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -4021,6 +4424,13 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4021 } 4424 }
4022 spin_unlock_irqrestore(&ha->vport_slock, flags); 4425 spin_unlock_irqrestore(&ha->vport_slock, flags);
4023 4426
4427 if (IS_QLA8031(ha)) {
4428 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
4429 "Setting back fcoe driver presence.\n");
4430 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
4431 ql_dbg(ql_dbg_p3p, vha, 0xb074,
4432 "Error while setting DRV-Presence.\n");
4433 }
4024 } else { 4434 } else {
4025 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n", 4435 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
4026 __func__); 4436 __func__);
@@ -5088,6 +5498,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5088 rval = 1; 5498 rval = 1;
5089 } 5499 }
5090 5500
5501 if (IS_T10_PI_CAPABLE(ha))
5502 nv->frame_payload_size &= ~7;
5503
5091 /* Reset Initialization control block */ 5504 /* Reset Initialization control block */
5092 memset(icb, 0, ha->init_cb_size); 5505 memset(icb, 0, ha->init_cb_size);
5093 5506
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 6e457643c63..c0462c04c88 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -57,6 +57,20 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
57 return fcp; 57 return fcp;
58} 58}
59 59
60static inline void
61qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
62{
63 int i;
64
65 if (IS_FWI2_CAPABLE(ha))
66 return;
67
68 for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
69 set_bit(i, ha->loop_id_map);
70 set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
71 set_bit(BROADCAST, ha->loop_id_map);
72}
73
60static inline int 74static inline int
61qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id) 75qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
62{ 76{
@@ -69,6 +83,18 @@ qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
69} 83}
70 84
71static inline void 85static inline void
86qla2x00_clear_loop_id(fc_port_t *fcport) {
87 struct qla_hw_data *ha = fcport->vha->hw;
88
89 if (fcport->loop_id == FC_NO_LOOP_ID ||
90 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
91 return;
92
93 clear_bit(fcport->loop_id, ha->loop_id_map);
94 fcport->loop_id = FC_NO_LOOP_ID;
95}
96
97static inline void
72qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp) 98qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
73{ 99{
74 struct dsd_dma *dsd_ptr, *tdsd_ptr; 100 struct dsd_dma *dsd_ptr, *tdsd_ptr;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 70dbf53d9e0..03b75263283 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -147,13 +147,6 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 147 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148 uint8_t guard = scsi_host_get_guard(cmd->device->host); 148 uint8_t guard = scsi_host_get_guard(cmd->device->host);
149 149
150 /* We only support T10 DIF right now */
151 if (guard != SHOST_DIX_GUARD_CRC) {
152 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
153 "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
154 return 0;
155 }
156
157 /* We always use DIFF Bundling for best performance */ 150 /* We always use DIFF Bundling for best performance */
158 *fw_prot_opts = 0; 151 *fw_prot_opts = 0;
159 152
@@ -172,10 +165,11 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
172 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 165 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
173 break; 166 break;
174 case SCSI_PROT_READ_PASS: 167 case SCSI_PROT_READ_PASS:
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
176 break;
177 case SCSI_PROT_WRITE_PASS: 168 case SCSI_PROT_WRITE_PASS:
178 *fw_prot_opts |= PO_MODE_DIF_PASS; 169 if (guard & SHOST_DIX_GUARD_IP)
170 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
171 else
172 *fw_prot_opts |= PO_MODE_DIF_PASS;
179 break; 173 break;
180 default: /* Normal Request */ 174 default: /* Normal Request */
181 *fw_prot_opts |= PO_MODE_DIF_PASS; 175 *fw_prot_opts |= PO_MODE_DIF_PASS;
@@ -821,7 +815,6 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
821 unsigned int protcnt) 815 unsigned int protcnt)
822{ 816{
823 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 817 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
824 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
825 818
826 switch (scsi_get_prot_type(cmd)) { 819 switch (scsi_get_prot_type(cmd)) {
827 case SCSI_PROT_DIF_TYPE0: 820 case SCSI_PROT_DIF_TYPE0:
@@ -891,12 +884,6 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
891 pkt->ref_tag_mask[3] = 0xff; 884 pkt->ref_tag_mask[3] = 0xff;
892 break; 885 break;
893 } 886 }
894
895 ql_dbg(ql_dbg_io, vha, 0x3009,
896 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
897 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
898 pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
899 scsi_get_prot_type(cmd), cmd);
900} 887}
901 888
902struct qla2_sgx { 889struct qla2_sgx {
@@ -1068,9 +1055,6 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1068 int i; 1055 int i;
1069 uint16_t used_dsds = tot_dsds; 1056 uint16_t used_dsds = tot_dsds;
1070 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1057 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1071 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1072
1073 uint8_t *cp;
1074 1058
1075 scsi_for_each_sg(cmd, sg, tot_dsds, i) { 1059 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1076 dma_addr_t sle_dma; 1060 dma_addr_t sle_dma;
@@ -1113,19 +1097,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1113 cur_dsd = (uint32_t *)next_dsd; 1097 cur_dsd = (uint32_t *)next_dsd;
1114 } 1098 }
1115 sle_dma = sg_dma_address(sg); 1099 sle_dma = sg_dma_address(sg);
1116 ql_dbg(ql_dbg_io, vha, 0x300a, 1100
1117 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1118 i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
1119 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 1101 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1120 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 1102 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1121 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 1103 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1122 avail_dsds--; 1104 avail_dsds--;
1123 1105
1124 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1125 cp = page_address(sg_page(sg)) + sg->offset;
1126 ql_dbg(ql_dbg_io, vha, 0x300b,
1127 "User data buffer=%p for cmd=%p.\n", cp, cmd);
1128 }
1129 } 1106 }
1130 /* Null termination */ 1107 /* Null termination */
1131 *cur_dsd++ = 0; 1108 *cur_dsd++ = 0;
@@ -1148,8 +1125,6 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1148 struct scsi_cmnd *cmd; 1125 struct scsi_cmnd *cmd;
1149 uint32_t *cur_dsd = dsd; 1126 uint32_t *cur_dsd = dsd;
1150 uint16_t used_dsds = tot_dsds; 1127 uint16_t used_dsds = tot_dsds;
1151 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1152 uint8_t *cp;
1153 1128
1154 cmd = GET_CMD_SP(sp); 1129 cmd = GET_CMD_SP(sp);
1155 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) { 1130 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
@@ -1193,23 +1168,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1193 cur_dsd = (uint32_t *)next_dsd; 1168 cur_dsd = (uint32_t *)next_dsd;
1194 } 1169 }
1195 sle_dma = sg_dma_address(sg); 1170 sle_dma = sg_dma_address(sg);
1196 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) { 1171
1197 ql_dbg(ql_dbg_io, vha, 0x3027,
1198 "%s(): %p, sg_entry %d - "
1199 "addr=0x%x0x%x, len=%d.\n",
1200 __func__, cur_dsd, i,
1201 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
1202 }
1203 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 1172 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1204 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 1173 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1205 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 1174 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1206 1175
1207 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1208 cp = page_address(sg_page(sg)) + sg->offset;
1209 ql_dbg(ql_dbg_io, vha, 0x3028,
1210 "%s(): Protection Data buffer = %p.\n", __func__,
1211 cp);
1212 }
1213 avail_dsds--; 1176 avail_dsds--;
1214 } 1177 }
1215 /* Null termination */ 1178 /* Null termination */
@@ -1386,6 +1349,16 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1386 1349
1387 if (!qla2x00_hba_err_chk_enabled(sp)) 1350 if (!qla2x00_hba_err_chk_enabled(sp))
1388 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 1351 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1352 /* HBA error checking enabled */
1353 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1354 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1355 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1356 SCSI_PROT_DIF_TYPE2))
1357 fw_prot_opts |= BIT_10;
1358 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1359 SCSI_PROT_DIF_TYPE3)
1360 fw_prot_opts |= BIT_11;
1361 }
1389 1362
1390 if (!bundling) { 1363 if (!bundling) {
1391 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; 1364 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
@@ -1858,7 +1831,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1858 } 1831 }
1859 if (index == MAX_OUTSTANDING_COMMANDS) { 1832 if (index == MAX_OUTSTANDING_COMMANDS) {
1860 ql_log(ql_log_warn, vha, 0x700b, 1833 ql_log(ql_log_warn, vha, 0x700b,
1861 "No room on oustanding cmd array.\n"); 1834 "No room on outstanding cmd array.\n");
1862 goto queuing_error; 1835 goto queuing_error;
1863 } 1836 }
1864 1837
@@ -2665,3 +2638,201 @@ done:
2665 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2638 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2666 return rval; 2639 return rval;
2667} 2640}
2641
2642static void
2643qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2644 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2645{
2646 uint16_t avail_dsds;
2647 uint32_t *cur_dsd;
2648 uint32_t req_data_len = 0;
2649 uint32_t rsp_data_len = 0;
2650 struct scatterlist *sg;
2651 int index;
2652 int entry_count = 1;
2653 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2654
2655 /*Update entry type to indicate bidir command */
2656 *((uint32_t *)(&cmd_pkt->entry_type)) =
2657 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2658
2659 /* Set the transfer direction, in this set both flags
2660 * Also set the BD_WRAP_BACK flag, firmware will take care
2661 * assigning DID=SID for outgoing pkts.
2662 */
2663 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2664 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2665 cmd_pkt->control_flags =
2666 __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2667 BD_WRAP_BACK);
2668
2669 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2670 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2671 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2672 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2673
2674 vha->bidi_stats.transfer_bytes += req_data_len;
2675 vha->bidi_stats.io_count++;
2676
2677 /* Only one dsd is available for bidirectional IOCB, remaining dsds
2678 * are bundled in continuation iocb
2679 */
2680 avail_dsds = 1;
2681 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2682
2683 index = 0;
2684
2685 for_each_sg(bsg_job->request_payload.sg_list, sg,
2686 bsg_job->request_payload.sg_cnt, index) {
2687 dma_addr_t sle_dma;
2688 cont_a64_entry_t *cont_pkt;
2689
2690 /* Allocate additional continuation packets */
2691 if (avail_dsds == 0) {
2692 /* Continuation type 1 IOCB can accomodate
2693 * 5 DSDS
2694 */
2695 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2696 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2697 avail_dsds = 5;
2698 entry_count++;
2699 }
2700 sle_dma = sg_dma_address(sg);
2701 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2702 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2703 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2704 avail_dsds--;
2705 }
2706 /* For read request DSD will always goes to continuation IOCB
2707 * and follow the write DSD. If there is room on the current IOCB
2708 * then it is added to that IOCB else new continuation IOCB is
2709 * allocated.
2710 */
2711 for_each_sg(bsg_job->reply_payload.sg_list, sg,
2712 bsg_job->reply_payload.sg_cnt, index) {
2713 dma_addr_t sle_dma;
2714 cont_a64_entry_t *cont_pkt;
2715
2716 /* Allocate additional continuation packets */
2717 if (avail_dsds == 0) {
2718 /* Continuation type 1 IOCB can accomodate
2719 * 5 DSDS
2720 */
2721 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2722 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2723 avail_dsds = 5;
2724 entry_count++;
2725 }
2726 sle_dma = sg_dma_address(sg);
2727 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2728 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2729 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2730 avail_dsds--;
2731 }
2732 /* This value should be same as number of IOCB required for this cmd */
2733 cmd_pkt->entry_count = entry_count;
2734}
2735
2736int
2737qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2738{
2739
2740 struct qla_hw_data *ha = vha->hw;
2741 unsigned long flags;
2742 uint32_t handle;
2743 uint32_t index;
2744 uint16_t req_cnt;
2745 uint16_t cnt;
2746 uint32_t *clr_ptr;
2747 struct cmd_bidir *cmd_pkt = NULL;
2748 struct rsp_que *rsp;
2749 struct req_que *req;
2750 int rval = EXT_STATUS_OK;
2751 device_reg_t __iomem *reg = ISP_QUE_REG(ha, vha->req->id);
2752
2753 rval = QLA_SUCCESS;
2754
2755 rsp = ha->rsp_q_map[0];
2756 req = vha->req;
2757
2758 /* Send marker if required */
2759 if (vha->marker_needed != 0) {
2760 if (qla2x00_marker(vha, req,
2761 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2762 return EXT_STATUS_MAILBOX;
2763 vha->marker_needed = 0;
2764 }
2765
2766 /* Acquire ring specific lock */
2767 spin_lock_irqsave(&ha->hardware_lock, flags);
2768
2769 /* Check for room in outstanding command list. */
2770 handle = req->current_outstanding_cmd;
2771 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2772 handle++;
2773 if (handle == MAX_OUTSTANDING_COMMANDS)
2774 handle = 1;
2775 if (!req->outstanding_cmds[handle])
2776 break;
2777 }
2778
2779 if (index == MAX_OUTSTANDING_COMMANDS) {
2780 rval = EXT_STATUS_BUSY;
2781 goto queuing_error;
2782 }
2783
2784 /* Calculate number of IOCB required */
2785 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2786
2787 /* Check for room on request queue. */
2788 if (req->cnt < req_cnt + 2) {
2789 if (ha->mqenable)
2790 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2791 else if (IS_QLA82XX(ha))
2792 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2793 else if (IS_FWI2_CAPABLE(ha))
2794 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2795 else
2796 cnt = qla2x00_debounce_register(
2797 ISP_REQ_Q_OUT(ha, &reg->isp));
2798
2799 if (req->ring_index < cnt)
2800 req->cnt = cnt - req->ring_index;
2801 else
2802 req->cnt = req->length -
2803 (req->ring_index - cnt);
2804 }
2805 if (req->cnt < req_cnt + 2) {
2806 rval = EXT_STATUS_BUSY;
2807 goto queuing_error;
2808 }
2809
2810 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2811 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2812
2813 /* Zero out remaining portion of packet. */
2814 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2815 clr_ptr = (uint32_t *)cmd_pkt + 2;
2816 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2817
2818 /* Set NPORT-ID (of vha)*/
2819 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2820 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2821 cmd_pkt->port_id[1] = vha->d_id.b.area;
2822 cmd_pkt->port_id[2] = vha->d_id.b.domain;
2823
2824 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2825 cmd_pkt->entry_status = (uint8_t) rsp->id;
2826 /* Build command packet. */
2827 req->current_outstanding_cmd = handle;
2828 req->outstanding_cmds[handle] = sp;
2829 sp->handle = handle;
2830 req->cnt -= req_cnt;
2831
2832 /* Send the command to the firmware */
2833 wmb();
2834 qla2x00_start_iocbs(vha, req);
2835queuing_error:
2836 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2837 return rval;
2838}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6f67a9d4998..5733811ce8e 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -294,6 +294,11 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
294 "%04x %04x %04x %04x %04x %04x %04x.\n", 294 "%04x %04x %04x %04x %04x %04x %04x.\n",
295 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 295 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
296 mb[4], mb[5], mb[6]); 296 mb[4], mb[5], mb[6]);
297 if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
298 vha->hw->flags.idc_compl_status = 1;
299 if (vha->hw->notify_dcbx_comp)
300 complete(&vha->hw->dcbx_comp);
301 }
297 302
298 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 303 /* Acknowledgement needed? [Notify && non-zero timeout]. */
299 timeout = (descr >> 8) & 0xf; 304 timeout = (descr >> 8) & 0xf;
@@ -332,6 +337,166 @@ qla2x00_get_link_speed_str(struct qla_hw_data *ha)
332 return link_speed; 337 return link_speed;
333} 338}
334 339
340void
341qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
342{
343 struct qla_hw_data *ha = vha->hw;
344
345 /*
346 * 8200 AEN Interpretation:
347 * mb[0] = AEN code
348 * mb[1] = AEN Reason code
349 * mb[2] = LSW of Peg-Halt Status-1 Register
350 * mb[6] = MSW of Peg-Halt Status-1 Register
351 * mb[3] = LSW of Peg-Halt Status-2 register
352 * mb[7] = MSW of Peg-Halt Status-2 register
353 * mb[4] = IDC Device-State Register value
354 * mb[5] = IDC Driver-Presence Register value
355 */
356 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
357 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
358 mb[0], mb[1], mb[2], mb[6]);
359 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
360 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
361 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
362
363 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
364 IDC_HEARTBEAT_FAILURE)) {
365 ha->flags.nic_core_hung = 1;
366 ql_log(ql_log_warn, vha, 0x5060,
367 "83XX: F/W Error Reported: Check if reset required.\n");
368
369 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
370 uint32_t protocol_engine_id, fw_err_code, err_level;
371
372 /*
373 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
374 * - PEG-Halt Status-1 Register:
375 * (LSW = mb[2], MSW = mb[6])
376 * Bits 0-7 = protocol-engine ID
377 * Bits 8-28 = f/w error code
378 * Bits 29-31 = Error-level
379 * Error-level 0x1 = Non-Fatal error
380 * Error-level 0x2 = Recoverable Fatal error
381 * Error-level 0x4 = UnRecoverable Fatal error
382 * - PEG-Halt Status-2 Register:
383 * (LSW = mb[3], MSW = mb[7])
384 */
385 protocol_engine_id = (mb[2] & 0xff);
386 fw_err_code = (((mb[2] & 0xff00) >> 8) |
387 ((mb[6] & 0x1fff) << 8));
388 err_level = ((mb[6] & 0xe000) >> 13);
389 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
390 "Register: protocol_engine_id=0x%x "
391 "fw_err_code=0x%x err_level=0x%x.\n",
392 protocol_engine_id, fw_err_code, err_level);
393 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
394 "Register: 0x%x%x.\n", mb[7], mb[3]);
395 if (err_level == ERR_LEVEL_NON_FATAL) {
396 ql_log(ql_log_warn, vha, 0x5063,
397 "Not a fatal error, f/w has recovered "
398 "iteself.\n");
399 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
400 ql_log(ql_log_fatal, vha, 0x5064,
401 "Recoverable Fatal error: Chip reset "
402 "required.\n");
403 qla83xx_schedule_work(vha,
404 QLA83XX_NIC_CORE_RESET);
405 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
406 ql_log(ql_log_fatal, vha, 0x5065,
407 "Unrecoverable Fatal error: Set FAILED "
408 "state, reboot required.\n");
409 qla83xx_schedule_work(vha,
410 QLA83XX_NIC_CORE_UNRECOVERABLE);
411 }
412 }
413
414 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
415 uint16_t peg_fw_state, nw_interface_link_up;
416 uint16_t nw_interface_signal_detect, sfp_status;
417 uint16_t htbt_counter, htbt_monitor_enable;
418 uint16_t sfp_additonal_info, sfp_multirate;
419 uint16_t sfp_tx_fault, link_speed, dcbx_status;
420
421 /*
422 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
423 * - PEG-to-FC Status Register:
424 * (LSW = mb[2], MSW = mb[6])
425 * Bits 0-7 = Peg-Firmware state
426 * Bit 8 = N/W Interface Link-up
427 * Bit 9 = N/W Interface signal detected
428 * Bits 10-11 = SFP Status
429 * SFP Status 0x0 = SFP+ transceiver not expected
430 * SFP Status 0x1 = SFP+ transceiver not present
431 * SFP Status 0x2 = SFP+ transceiver invalid
432 * SFP Status 0x3 = SFP+ transceiver present and
433 * valid
434 * Bits 12-14 = Heartbeat Counter
435 * Bit 15 = Heartbeat Monitor Enable
436 * Bits 16-17 = SFP Additional Info
437 * SFP info 0x0 = Unregocnized transceiver for
438 * Ethernet
439 * SFP info 0x1 = SFP+ brand validation failed
440 * SFP info 0x2 = SFP+ speed validation failed
441 * SFP info 0x3 = SFP+ access error
442 * Bit 18 = SFP Multirate
443 * Bit 19 = SFP Tx Fault
444 * Bits 20-22 = Link Speed
445 * Bits 23-27 = Reserved
446 * Bits 28-30 = DCBX Status
447 * DCBX Status 0x0 = DCBX Disabled
448 * DCBX Status 0x1 = DCBX Enabled
449 * DCBX Status 0x2 = DCBX Exchange error
450 * Bit 31 = Reserved
451 */
452 peg_fw_state = (mb[2] & 0x00ff);
453 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
454 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
455 sfp_status = ((mb[2] & 0x0c00) >> 10);
456 htbt_counter = ((mb[2] & 0x7000) >> 12);
457 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
458 sfp_additonal_info = (mb[6] & 0x0003);
459 sfp_multirate = ((mb[6] & 0x0004) >> 2);
460 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
461 link_speed = ((mb[6] & 0x0070) >> 4);
462 dcbx_status = ((mb[6] & 0x7000) >> 12);
463
464 ql_log(ql_log_warn, vha, 0x5066,
465 "Peg-to-Fc Status Register:\n"
466 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
467 "nw_interface_signal_detect=0x%x"
468 "\nsfp_statis=0x%x.\n ", peg_fw_state,
469 nw_interface_link_up, nw_interface_signal_detect,
470 sfp_status);
471 ql_log(ql_log_warn, vha, 0x5067,
472 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
473 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
474 htbt_counter, htbt_monitor_enable,
475 sfp_additonal_info, sfp_multirate);
476 ql_log(ql_log_warn, vha, 0x5068,
477 "sfp_tx_fault=0x%x, link_state=0x%x, "
478 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
479 dcbx_status);
480
481 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
482 }
483
484 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
485 ql_log(ql_log_warn, vha, 0x5069,
486 "Heartbeat Failure encountered, chip reset "
487 "required.\n");
488
489 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
490 }
491 }
492
493 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
494 ql_log(ql_log_info, vha, 0x506a,
495 "IDC Device-State changed = 0x%x.\n", mb[4]);
496 qla83xx_schedule_work(vha, MBA_IDC_AEN);
497 }
498}
499
335/** 500/**
336 * qla2x00_async_event() - Process aynchronous events. 501 * qla2x00_async_event() - Process aynchronous events.
337 * @ha: SCSI driver HA context 502 * @ha: SCSI driver HA context
@@ -681,8 +846,7 @@ skip_rio:
681 * it. Otherwise ignore it and Wait for RSCN to come in. 846 * it. Otherwise ignore it and Wait for RSCN to come in.
682 */ 847 */
683 atomic_set(&vha->loop_down_timer, 0); 848 atomic_set(&vha->loop_down_timer, 0);
684 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 849 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
685 atomic_read(&vha->loop_state) != LOOP_DEAD) {
686 ql_dbg(ql_dbg_async, vha, 0x5011, 850 ql_dbg(ql_dbg_async, vha, 0x5011,
687 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 851 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
688 mb[1], mb[2], mb[3]); 852 mb[1], mb[2], mb[3]);
@@ -822,11 +986,28 @@ skip_rio:
822 "FCF Configuration Error -- %04x %04x %04x.\n", 986 "FCF Configuration Error -- %04x %04x %04x.\n",
823 mb[1], mb[2], mb[3]); 987 mb[1], mb[2], mb[3]);
824 break; 988 break;
825 case MBA_IDC_COMPLETE:
826 case MBA_IDC_NOTIFY: 989 case MBA_IDC_NOTIFY:
990 /* See if we need to quiesce any I/O */
991 if (IS_QLA8031(vha->hw))
992 if ((mb[2] & 0x7fff) == MBC_PORT_RESET ||
993 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) {
994 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
995 qla2xxx_wake_dpc(vha);
996 }
997 case MBA_IDC_COMPLETE:
827 case MBA_IDC_TIME_EXT: 998 case MBA_IDC_TIME_EXT:
828 qla81xx_idc_event(vha, mb[0], mb[1]); 999 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
1000 qla81xx_idc_event(vha, mb[0], mb[1]);
829 break; 1001 break;
1002
1003 case MBA_IDC_AEN:
1004 mb[4] = RD_REG_WORD(&reg24->mailbox4);
1005 mb[5] = RD_REG_WORD(&reg24->mailbox5);
1006 mb[6] = RD_REG_WORD(&reg24->mailbox6);
1007 mb[7] = RD_REG_WORD(&reg24->mailbox7);
1008 qla83xx_handle_8200_aen(vha, mb);
1009 break;
1010
830 default: 1011 default:
831 ql_dbg(ql_dbg_async, vha, 0x5057, 1012 ql_dbg(ql_dbg_async, vha, 0x5057,
832 "Unknown AEN:%04x %04x %04x %04x\n", 1013 "Unknown AEN:%04x %04x %04x %04x\n",
@@ -1414,7 +1595,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1414 1595
1415struct scsi_dif_tuple { 1596struct scsi_dif_tuple {
1416 __be16 guard; /* Checksum */ 1597 __be16 guard; /* Checksum */
1417 __be16 app_tag; /* APPL identifer */ 1598 __be16 app_tag; /* APPL identifier */
1418 __be32 ref_tag; /* Target LBA or indirect LBA */ 1599 __be32 ref_tag; /* Target LBA or indirect LBA */
1419}; 1600};
1420 1601
@@ -1546,6 +1727,149 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1546 return 1; 1727 return 1;
1547} 1728}
1548 1729
1730static void
1731qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1732 struct req_que *req, uint32_t index)
1733{
1734 struct qla_hw_data *ha = vha->hw;
1735 srb_t *sp;
1736 uint16_t comp_status;
1737 uint16_t scsi_status;
1738 uint16_t thread_id;
1739 uint32_t rval = EXT_STATUS_OK;
1740 struct fc_bsg_job *bsg_job = NULL;
1741 sts_entry_t *sts;
1742 struct sts_entry_24xx *sts24;
1743 sts = (sts_entry_t *) pkt;
1744 sts24 = (struct sts_entry_24xx *) pkt;
1745
1746 /* Validate handle. */
1747 if (index >= MAX_OUTSTANDING_COMMANDS) {
1748 ql_log(ql_log_warn, vha, 0x70af,
1749 "Invalid SCSI completion handle 0x%x.\n", index);
1750 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1751 return;
1752 }
1753
1754 sp = req->outstanding_cmds[index];
1755 if (sp) {
1756 /* Free outstanding command slot. */
1757 req->outstanding_cmds[index] = NULL;
1758 bsg_job = sp->u.bsg_job;
1759 } else {
1760 ql_log(ql_log_warn, vha, 0x70b0,
1761 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1762 req->id, index);
1763
1764 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1765 return;
1766 }
1767
1768 if (IS_FWI2_CAPABLE(ha)) {
1769 comp_status = le16_to_cpu(sts24->comp_status);
1770 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1771 } else {
1772 comp_status = le16_to_cpu(sts->comp_status);
1773 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1774 }
1775
1776 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1777 switch (comp_status) {
1778 case CS_COMPLETE:
1779 if (scsi_status == 0) {
1780 bsg_job->reply->reply_payload_rcv_len =
1781 bsg_job->reply_payload.payload_len;
1782 rval = EXT_STATUS_OK;
1783 }
1784 goto done;
1785
1786 case CS_DATA_OVERRUN:
1787 ql_dbg(ql_dbg_user, vha, 0x70b1,
1788 "Command completed with date overrun thread_id=%d\n",
1789 thread_id);
1790 rval = EXT_STATUS_DATA_OVERRUN;
1791 break;
1792
1793 case CS_DATA_UNDERRUN:
1794 ql_dbg(ql_dbg_user, vha, 0x70b2,
1795 "Command completed with date underrun thread_id=%d\n",
1796 thread_id);
1797 rval = EXT_STATUS_DATA_UNDERRUN;
1798 break;
1799 case CS_BIDIR_RD_OVERRUN:
1800 ql_dbg(ql_dbg_user, vha, 0x70b3,
1801 "Command completed with read data overrun thread_id=%d\n",
1802 thread_id);
1803 rval = EXT_STATUS_DATA_OVERRUN;
1804 break;
1805
1806 case CS_BIDIR_RD_WR_OVERRUN:
1807 ql_dbg(ql_dbg_user, vha, 0x70b4,
1808 "Command completed with read and write data overrun "
1809 "thread_id=%d\n", thread_id);
1810 rval = EXT_STATUS_DATA_OVERRUN;
1811 break;
1812
1813 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1814 ql_dbg(ql_dbg_user, vha, 0x70b5,
1815 "Command completed with read data over and write data "
1816 "underrun thread_id=%d\n", thread_id);
1817 rval = EXT_STATUS_DATA_OVERRUN;
1818 break;
1819
1820 case CS_BIDIR_RD_UNDERRUN:
1821 ql_dbg(ql_dbg_user, vha, 0x70b6,
1822 "Command completed with read data data underrun "
1823 "thread_id=%d\n", thread_id);
1824 rval = EXT_STATUS_DATA_UNDERRUN;
1825 break;
1826
1827 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1828 ql_dbg(ql_dbg_user, vha, 0x70b7,
1829 "Command completed with read data under and write data "
1830 "overrun thread_id=%d\n", thread_id);
1831 rval = EXT_STATUS_DATA_UNDERRUN;
1832 break;
1833
1834 case CS_BIDIR_RD_WR_UNDERRUN:
1835 ql_dbg(ql_dbg_user, vha, 0x70b8,
1836 "Command completed with read and write data underrun "
1837 "thread_id=%d\n", thread_id);
1838 rval = EXT_STATUS_DATA_UNDERRUN;
1839 break;
1840
1841 case CS_BIDIR_DMA:
1842 ql_dbg(ql_dbg_user, vha, 0x70b9,
1843 "Command completed with data DMA error thread_id=%d\n",
1844 thread_id);
1845 rval = EXT_STATUS_DMA_ERR;
1846 break;
1847
1848 case CS_TIMEOUT:
1849 ql_dbg(ql_dbg_user, vha, 0x70ba,
1850 "Command completed with timeout thread_id=%d\n",
1851 thread_id);
1852 rval = EXT_STATUS_TIMEOUT;
1853 break;
1854 default:
1855 ql_dbg(ql_dbg_user, vha, 0x70bb,
1856 "Command completed with completion status=0x%x "
1857 "thread_id=%d\n", comp_status, thread_id);
1858 rval = EXT_STATUS_ERR;
1859 break;
1860 }
1861 bsg_job->reply->reply_payload_rcv_len = 0;
1862
1863done:
1864 /* Return the vendor specific reply to API */
1865 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1866 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1867 /* Always return DID_OK, bsg will send the vendor specific response
1868 * in this case only */
1869 sp->done(vha, sp, (DID_OK << 6));
1870
1871}
1872
1549/** 1873/**
1550 * qla2x00_status_entry() - Process a Status IOCB entry. 1874 * qla2x00_status_entry() - Process a Status IOCB entry.
1551 * @ha: SCSI driver HA context 1875 * @ha: SCSI driver HA context
@@ -1573,12 +1897,14 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1573 struct req_que *req; 1897 struct req_que *req;
1574 int logit = 1; 1898 int logit = 1;
1575 int res = 0; 1899 int res = 0;
1900 uint16_t state_flags = 0;
1576 1901
1577 sts = (sts_entry_t *) pkt; 1902 sts = (sts_entry_t *) pkt;
1578 sts24 = (struct sts_entry_24xx *) pkt; 1903 sts24 = (struct sts_entry_24xx *) pkt;
1579 if (IS_FWI2_CAPABLE(ha)) { 1904 if (IS_FWI2_CAPABLE(ha)) {
1580 comp_status = le16_to_cpu(sts24->comp_status); 1905 comp_status = le16_to_cpu(sts24->comp_status);
1581 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1906 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1907 state_flags = le16_to_cpu(sts24->state_flags);
1582 } else { 1908 } else {
1583 comp_status = le16_to_cpu(sts->comp_status); 1909 comp_status = le16_to_cpu(sts->comp_status);
1584 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1910 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
@@ -1587,17 +1913,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1587 que = MSW(sts->handle); 1913 que = MSW(sts->handle);
1588 req = ha->req_q_map[que]; 1914 req = ha->req_q_map[que];
1589 1915
1590 /* Fast path completion. */
1591 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1592 qla2x00_process_completed_request(vha, req, handle);
1593
1594 return;
1595 }
1596
1597 /* Validate handle. */ 1916 /* Validate handle. */
1598 if (handle < MAX_OUTSTANDING_COMMANDS) { 1917 if (handle < MAX_OUTSTANDING_COMMANDS) {
1599 sp = req->outstanding_cmds[handle]; 1918 sp = req->outstanding_cmds[handle];
1600 req->outstanding_cmds[handle] = NULL;
1601 } else 1919 } else
1602 sp = NULL; 1920 sp = NULL;
1603 1921
@@ -1612,6 +1930,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1612 qla2xxx_wake_dpc(vha); 1930 qla2xxx_wake_dpc(vha);
1613 return; 1931 return;
1614 } 1932 }
1933
1934 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
1935 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
1936 return;
1937 }
1938
1939 /* Fast path completion. */
1940 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1941 qla2x00_process_completed_request(vha, req, handle);
1942
1943 return;
1944 }
1945
1946 req->outstanding_cmds[handle] = NULL;
1615 cp = GET_CMD_SP(sp); 1947 cp = GET_CMD_SP(sp);
1616 if (cp == NULL) { 1948 if (cp == NULL) {
1617 ql_dbg(ql_dbg_io, vha, 0x3018, 1949 ql_dbg(ql_dbg_io, vha, 0x3018,
@@ -1830,7 +2162,21 @@ check_scsi_status:
1830 2162
1831 case CS_DIF_ERROR: 2163 case CS_DIF_ERROR:
1832 logit = qla2x00_handle_dif_error(sp, sts24); 2164 logit = qla2x00_handle_dif_error(sp, sts24);
2165 res = cp->result;
1833 break; 2166 break;
2167
2168 case CS_TRANSPORT:
2169 res = DID_ERROR << 16;
2170
2171 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2172 break;
2173
2174 if (state_flags & BIT_4)
2175 scmd_printk(KERN_WARNING, cp,
2176 "Unsupported device '%s' found.\n",
2177 cp->device->vendor);
2178 break;
2179
1834 default: 2180 default:
1835 res = DID_ERROR << 16; 2181 res = DID_ERROR << 16;
1836 break; 2182 break;
@@ -2150,7 +2496,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
2150 unsigned long iter; 2496 unsigned long iter;
2151 uint32_t stat; 2497 uint32_t stat;
2152 uint32_t hccr; 2498 uint32_t hccr;
2153 uint16_t mb[4]; 2499 uint16_t mb[8];
2154 struct rsp_que *rsp; 2500 struct rsp_que *rsp;
2155 unsigned long flags; 2501 unsigned long flags;
2156 2502
@@ -2191,29 +2537,29 @@ qla24xx_intr_handler(int irq, void *dev_id)
2191 break; 2537 break;
2192 2538
2193 switch (stat & 0xff) { 2539 switch (stat & 0xff) {
2194 case 0x1: 2540 case INTR_ROM_MB_SUCCESS:
2195 case 0x2: 2541 case INTR_ROM_MB_FAILED:
2196 case 0x10: 2542 case INTR_MB_SUCCESS:
2197 case 0x11: 2543 case INTR_MB_FAILED:
2198 qla24xx_mbx_completion(vha, MSW(stat)); 2544 qla24xx_mbx_completion(vha, MSW(stat));
2199 status |= MBX_INTERRUPT; 2545 status |= MBX_INTERRUPT;
2200 2546
2201 break; 2547 break;
2202 case 0x12: 2548 case INTR_ASYNC_EVENT:
2203 mb[0] = MSW(stat); 2549 mb[0] = MSW(stat);
2204 mb[1] = RD_REG_WORD(&reg->mailbox1); 2550 mb[1] = RD_REG_WORD(&reg->mailbox1);
2205 mb[2] = RD_REG_WORD(&reg->mailbox2); 2551 mb[2] = RD_REG_WORD(&reg->mailbox2);
2206 mb[3] = RD_REG_WORD(&reg->mailbox3); 2552 mb[3] = RD_REG_WORD(&reg->mailbox3);
2207 qla2x00_async_event(vha, rsp, mb); 2553 qla2x00_async_event(vha, rsp, mb);
2208 break; 2554 break;
2209 case 0x13: 2555 case INTR_RSP_QUE_UPDATE:
2210 case 0x14: 2556 case INTR_RSP_QUE_UPDATE_83XX:
2211 qla24xx_process_response_queue(vha, rsp); 2557 qla24xx_process_response_queue(vha, rsp);
2212 break; 2558 break;
2213 case 0x1C: /* ATIO queue updated */ 2559 case INTR_ATIO_QUE_UPDATE:
2214 qlt_24xx_process_atio_queue(vha); 2560 qlt_24xx_process_atio_queue(vha);
2215 break; 2561 break;
2216 case 0x1D: /* ATIO and response queues updated */ 2562 case INTR_ATIO_RSP_QUE_UPDATE:
2217 qlt_24xx_process_atio_queue(vha); 2563 qlt_24xx_process_atio_queue(vha);
2218 qla24xx_process_response_queue(vha, rsp); 2564 qla24xx_process_response_queue(vha, rsp);
2219 break; 2565 break;
@@ -2224,6 +2570,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
2224 } 2570 }
2225 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 2571 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2226 RD_REG_DWORD_RELAXED(&reg->hccr); 2572 RD_REG_DWORD_RELAXED(&reg->hccr);
2573 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2574 ndelay(3500);
2227 } 2575 }
2228 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2576 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2229 2577
@@ -2306,7 +2654,7 @@ qla24xx_msix_default(int irq, void *dev_id)
2306 int status; 2654 int status;
2307 uint32_t stat; 2655 uint32_t stat;
2308 uint32_t hccr; 2656 uint32_t hccr;
2309 uint16_t mb[4]; 2657 uint16_t mb[8];
2310 unsigned long flags; 2658 unsigned long flags;
2311 2659
2312 rsp = (struct rsp_que *) dev_id; 2660 rsp = (struct rsp_que *) dev_id;
@@ -2342,29 +2690,29 @@ qla24xx_msix_default(int irq, void *dev_id)
2342 break; 2690 break;
2343 2691
2344 switch (stat & 0xff) { 2692 switch (stat & 0xff) {
2345 case 0x1: 2693 case INTR_ROM_MB_SUCCESS:
2346 case 0x2: 2694 case INTR_ROM_MB_FAILED:
2347 case 0x10: 2695 case INTR_MB_SUCCESS:
2348 case 0x11: 2696 case INTR_MB_FAILED:
2349 qla24xx_mbx_completion(vha, MSW(stat)); 2697 qla24xx_mbx_completion(vha, MSW(stat));
2350 status |= MBX_INTERRUPT; 2698 status |= MBX_INTERRUPT;
2351 2699
2352 break; 2700 break;
2353 case 0x12: 2701 case INTR_ASYNC_EVENT:
2354 mb[0] = MSW(stat); 2702 mb[0] = MSW(stat);
2355 mb[1] = RD_REG_WORD(&reg->mailbox1); 2703 mb[1] = RD_REG_WORD(&reg->mailbox1);
2356 mb[2] = RD_REG_WORD(&reg->mailbox2); 2704 mb[2] = RD_REG_WORD(&reg->mailbox2);
2357 mb[3] = RD_REG_WORD(&reg->mailbox3); 2705 mb[3] = RD_REG_WORD(&reg->mailbox3);
2358 qla2x00_async_event(vha, rsp, mb); 2706 qla2x00_async_event(vha, rsp, mb);
2359 break; 2707 break;
2360 case 0x13: 2708 case INTR_RSP_QUE_UPDATE:
2361 case 0x14: 2709 case INTR_RSP_QUE_UPDATE_83XX:
2362 qla24xx_process_response_queue(vha, rsp); 2710 qla24xx_process_response_queue(vha, rsp);
2363 break; 2711 break;
2364 case 0x1C: /* ATIO queue updated */ 2712 case INTR_ATIO_QUE_UPDATE:
2365 qlt_24xx_process_atio_queue(vha); 2713 qlt_24xx_process_atio_queue(vha);
2366 break; 2714 break;
2367 case 0x1D: /* ATIO and response queues updated */ 2715 case INTR_ATIO_RSP_QUE_UPDATE:
2368 qlt_24xx_process_atio_queue(vha); 2716 qlt_24xx_process_atio_queue(vha);
2369 qla24xx_process_response_queue(vha, rsp); 2717 qla24xx_process_response_queue(vha, rsp);
2370 break; 2718 break;
@@ -2570,7 +2918,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2570skip_msix: 2918skip_msix:
2571 2919
2572 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2920 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2573 !IS_QLA8001(ha)) 2921 !IS_QLA8001(ha) && !IS_QLA82XX(ha))
2574 goto skip_msi; 2922 goto skip_msi;
2575 2923
2576 ret = pci_enable_msi(ha->pdev); 2924 ret = pci_enable_msi(ha->pdev);
@@ -2581,6 +2929,11 @@ skip_msix:
2581 } else 2929 } else
2582 ql_log(ql_log_warn, vha, 0x0039, 2930 ql_log(ql_log_warn, vha, 0x0039,
2583 "MSI-X; Falling back-to INTa mode -- %d.\n", ret); 2931 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
2932
2933 /* Skip INTx on ISP82xx. */
2934 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
2935 return QLA_FUNCTION_FAILED;
2936
2584skip_msi: 2937skip_msi:
2585 2938
2586 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2939 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
@@ -2595,21 +2948,9 @@ skip_msi:
2595 2948
2596clear_risc_ints: 2949clear_risc_ints:
2597 2950
2598 /*
2599 * FIXME: Noted that 8014s were being dropped during NK testing.
2600 * Timing deltas during MSI-X/INTa transitions?
2601 */
2602 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA83XX(ha))
2603 goto fail;
2604 spin_lock_irq(&ha->hardware_lock); 2951 spin_lock_irq(&ha->hardware_lock);
2605 if (IS_FWI2_CAPABLE(ha)) { 2952 if (!IS_FWI2_CAPABLE(ha))
2606 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2607 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2608 } else {
2609 WRT_REG_WORD(&reg->isp.semaphore, 0); 2953 WRT_REG_WORD(&reg->isp.semaphore, 0);
2610 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2611 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
2612 }
2613 spin_unlock_irq(&ha->hardware_lock); 2954 spin_unlock_irq(&ha->hardware_lock);
2614 2955
2615fail: 2956fail:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index d5ce92c0a8f..18c509fae55 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -75,7 +75,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
75 return QLA_FUNCTION_TIMEOUT; 75 return QLA_FUNCTION_TIMEOUT;
76 } 76 }
77 77
78 if (ha->flags.isp82xx_fw_hung) { 78 if (IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung) {
79 /* Setting Link-Down error */ 79 /* Setting Link-Down error */
80 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 80 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
81 ql_log(ql_log_warn, vha, 0x1004, 81 ql_log(ql_log_warn, vha, 0x1004,
@@ -232,7 +232,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
232 ha->flags.mbox_int = 0; 232 ha->flags.mbox_int = 0;
233 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 233 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
234 234
235 if (ha->flags.isp82xx_fw_hung) { 235 if ((IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung)) {
236 ha->flags.mbox_busy = 0; 236 ha->flags.mbox_busy = 0;
237 /* Setting Link-Down error */ 237 /* Setting Link-Down error */
238 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 238 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
@@ -369,7 +369,7 @@ premature_exit:
369 369
370mbx_done: 370mbx_done:
371 if (rval) { 371 if (rval) {
372 ql_dbg(ql_dbg_mbx, base_vha, 0x1020, 372 ql_log(ql_log_warn, base_vha, 0x1020,
373 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", 373 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
374 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); 374 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
375 } else { 375 } else {
@@ -533,7 +533,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
533 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 533 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
534 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha)) 534 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha))
535 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 535 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
536 if (IS_QLA83XX(vha->hw)) 536 if (IS_FWI2_CAPABLE(ha))
537 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 537 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
538 mcp->flags = 0; 538 mcp->flags = 0;
539 mcp->tov = MBX_TOV_SECONDS; 539 mcp->tov = MBX_TOV_SECONDS;
@@ -559,18 +559,16 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
559 ha->phy_version[1] = mcp->mb[9] >> 8; 559 ha->phy_version[1] = mcp->mb[9] >> 8;
560 ha->phy_version[2] = mcp->mb[9] & 0xff; 560 ha->phy_version[2] = mcp->mb[9] & 0xff;
561 } 561 }
562 if (IS_QLA83XX(ha)) { 562 if (IS_FWI2_CAPABLE(ha)) {
563 if (mcp->mb[6] & BIT_15) { 563 ha->fw_attributes_h = mcp->mb[15];
564 ha->fw_attributes_h = mcp->mb[15]; 564 ha->fw_attributes_ext[0] = mcp->mb[16];
565 ha->fw_attributes_ext[0] = mcp->mb[16]; 565 ha->fw_attributes_ext[1] = mcp->mb[17];
566 ha->fw_attributes_ext[1] = mcp->mb[17]; 566 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
567 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139, 567 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
568 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 568 __func__, mcp->mb[15], mcp->mb[6]);
569 __func__, mcp->mb[15], mcp->mb[6]); 569 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
570 } else 570 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
571 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f, 571 __func__, mcp->mb[17], mcp->mb[16]);
572 "%s: FwAttributes [Upper] invalid, MB6:%04x\n",
573 __func__, mcp->mb[6]);
574 } 572 }
575 573
576failed: 574failed:
@@ -3408,7 +3406,6 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3408 3406
3409 return rval; 3407 return rval;
3410} 3408}
3411
3412/* 84XX Support **************************************************************/ 3409/* 84XX Support **************************************************************/
3413 3410
3414struct cs84xx_mgmt_cmd { 3411struct cs84xx_mgmt_cmd {
@@ -4428,7 +4425,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4428 "Entered %s.\n", __func__); 4425 "Entered %s.\n", __func__);
4429 4426
4430 /* Integer part */ 4427 /* Integer part */
4431 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0); 4428 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1,
4429 BIT_13|BIT_12|BIT_0);
4432 if (rval != QLA_SUCCESS) { 4430 if (rval != QLA_SUCCESS) {
4433 ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval); 4431 ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
4434 ha->flags.thermal_supported = 0; 4432 ha->flags.thermal_supported = 0;
@@ -4437,7 +4435,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4437 *temp = byte; 4435 *temp = byte;
4438 4436
4439 /* Fraction part */ 4437 /* Fraction part */
4440 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0); 4438 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1,
4439 BIT_13|BIT_12|BIT_0);
4441 if (rval != QLA_SUCCESS) { 4440 if (rval != QLA_SUCCESS) {
4442 ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval); 4441 ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
4443 ha->flags.thermal_supported = 0; 4442 ha->flags.thermal_supported = 0;
@@ -4741,7 +4740,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4741} 4740}
4742 4741
4743int 4742int
4744qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) 4743qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
4745{ 4744{
4746 int rval; 4745 int rval;
4747 struct qla_hw_data *ha = vha->hw; 4746 struct qla_hw_data *ha = vha->hw;
@@ -4814,3 +4813,186 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
4814 return rval; 4813 return rval;
4815} 4814}
4816 4815
4816int
4817qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
4818{
4819 int rval;
4820 mbx_cmd_t mc;
4821 mbx_cmd_t *mcp = &mc;
4822 struct qla_hw_data *ha = vha->hw;
4823 unsigned long retry_max_time = jiffies + (2 * HZ);
4824
4825 if (!IS_QLA83XX(ha))
4826 return QLA_FUNCTION_FAILED;
4827
4828 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
4829
4830retry_rd_reg:
4831 mcp->mb[0] = MBC_READ_REMOTE_REG;
4832 mcp->mb[1] = LSW(reg);
4833 mcp->mb[2] = MSW(reg);
4834 mcp->out_mb = MBX_2|MBX_1|MBX_0;
4835 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
4836 mcp->tov = MBX_TOV_SECONDS;
4837 mcp->flags = 0;
4838 rval = qla2x00_mailbox_command(vha, mcp);
4839
4840 if (rval != QLA_SUCCESS) {
4841 ql_dbg(ql_dbg_mbx, vha, 0x114c,
4842 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4843 rval, mcp->mb[0], mcp->mb[1]);
4844 } else {
4845 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
4846 if (*data == QLA8XXX_BAD_VALUE) {
4847 /*
4848 * During soft-reset CAMRAM register reads might
4849 * return 0xbad0bad0. So retry for MAX of 2 sec
4850 * while reading camram registers.
4851 */
4852 if (time_after(jiffies, retry_max_time)) {
4853 ql_dbg(ql_dbg_mbx, vha, 0x1141,
4854 "Failure to read CAMRAM register. "
4855 "data=0x%x.\n", *data);
4856 return QLA_FUNCTION_FAILED;
4857 }
4858 msleep(100);
4859 goto retry_rd_reg;
4860 }
4861 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
4862 }
4863
4864 return rval;
4865}
4866
4867int
4868qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
4869{
4870 int rval;
4871 mbx_cmd_t mc;
4872 mbx_cmd_t *mcp = &mc;
4873 struct qla_hw_data *ha = vha->hw;
4874
4875 if (!IS_QLA83XX(ha))
4876 return QLA_FUNCTION_FAILED;
4877
4878 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
4879
4880 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
4881 mcp->out_mb = MBX_0;
4882 mcp->in_mb = MBX_1|MBX_0;
4883 mcp->tov = MBX_TOV_SECONDS;
4884 mcp->flags = 0;
4885 rval = qla2x00_mailbox_command(vha, mcp);
4886
4887 if (rval != QLA_SUCCESS) {
4888 ql_dbg(ql_dbg_mbx, vha, 0x1144,
4889 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4890 rval, mcp->mb[0], mcp->mb[1]);
4891 ha->isp_ops->fw_dump(vha, 0);
4892 } else {
4893 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
4894 }
4895
4896 return rval;
4897}
4898
4899int
4900qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
4901 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
4902{
4903 int rval;
4904 mbx_cmd_t mc;
4905 mbx_cmd_t *mcp = &mc;
4906 uint8_t subcode = (uint8_t)options;
4907 struct qla_hw_data *ha = vha->hw;
4908
4909 if (!IS_QLA8031(ha))
4910 return QLA_FUNCTION_FAILED;
4911
4912 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
4913
4914 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
4915 mcp->mb[1] = options;
4916 mcp->out_mb = MBX_1|MBX_0;
4917 if (subcode & BIT_2) {
4918 mcp->mb[2] = LSW(start_addr);
4919 mcp->mb[3] = MSW(start_addr);
4920 mcp->mb[4] = LSW(end_addr);
4921 mcp->mb[5] = MSW(end_addr);
4922 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
4923 }
4924 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4925 if (!(subcode & (BIT_2 | BIT_5)))
4926 mcp->in_mb |= MBX_4|MBX_3;
4927 mcp->tov = MBX_TOV_SECONDS;
4928 mcp->flags = 0;
4929 rval = qla2x00_mailbox_command(vha, mcp);
4930
4931 if (rval != QLA_SUCCESS) {
4932 ql_dbg(ql_dbg_mbx, vha, 0x1147,
4933 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
4934 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
4935 mcp->mb[4]);
4936 ha->isp_ops->fw_dump(vha, 0);
4937 } else {
4938 if (subcode & BIT_5)
4939 *sector_size = mcp->mb[1];
4940 else if (subcode & (BIT_6 | BIT_7)) {
4941 ql_dbg(ql_dbg_mbx, vha, 0x1148,
4942 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
4943 } else if (subcode & (BIT_3 | BIT_4)) {
4944 ql_dbg(ql_dbg_mbx, vha, 0x1149,
4945 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
4946 }
4947 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
4948 }
4949
4950 return rval;
4951}
4952
4953int
4954qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4955 uint32_t size)
4956{
4957 int rval;
4958 mbx_cmd_t mc;
4959 mbx_cmd_t *mcp = &mc;
4960
4961 if (!IS_MCTP_CAPABLE(vha->hw))
4962 return QLA_FUNCTION_FAILED;
4963
4964 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
4965 "Entered %s.\n", __func__);
4966
4967 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4968 mcp->mb[1] = LSW(addr);
4969 mcp->mb[2] = MSW(req_dma);
4970 mcp->mb[3] = LSW(req_dma);
4971 mcp->mb[4] = MSW(size);
4972 mcp->mb[5] = LSW(size);
4973 mcp->mb[6] = MSW(MSD(req_dma));
4974 mcp->mb[7] = LSW(MSD(req_dma));
4975 mcp->mb[8] = MSW(addr);
4976 /* Setting RAM ID to valid */
4977 mcp->mb[10] |= BIT_7;
4978 /* For MCTP RAM ID is 0x40 */
4979 mcp->mb[10] |= 0x40;
4980
4981 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
4982 MBX_0;
4983
4984 mcp->in_mb = MBX_0;
4985 mcp->tov = MBX_TOV_SECONDS;
4986 mcp->flags = 0;
4987 rval = qla2x00_mailbox_command(vha, mcp);
4988
4989 if (rval != QLA_SUCCESS) {
4990 ql_dbg(ql_dbg_mbx, vha, 0x114e,
4991 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4992 } else {
4993 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
4994 "Done %s.\n", __func__);
4995 }
4996
4997 return rval;
4998}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 3e8b32419e6..bd4708a422c 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -476,7 +476,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
476 476
477 vha->req = base_vha->req; 477 vha->req = base_vha->req;
478 host->can_queue = base_vha->req->length + 128; 478 host->can_queue = base_vha->req->length + 128;
479 host->this_id = 255;
480 host->cmd_per_lun = 3; 479 host->cmd_per_lun = 3;
481 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 480 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
482 host->max_cmd_len = 32; 481 host->max_cmd_len = 32;
@@ -643,7 +642,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
643 &req->dma, GFP_KERNEL); 642 &req->dma, GFP_KERNEL);
644 if (req->ring == NULL) { 643 if (req->ring == NULL) {
645 ql_log(ql_log_fatal, base_vha, 0x00da, 644 ql_log(ql_log_fatal, base_vha, 0x00da,
646 "Failed to allocte memory for request_ring.\n"); 645 "Failed to allocate memory for request_ring.\n");
647 goto que_failed; 646 goto que_failed;
648 } 647 }
649 648
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 7cfdf2bd8ed..14cd361742f 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -1612,23 +1612,6 @@ qla82xx_get_fw_offs(struct qla_hw_data *ha)
1612} 1612}
1613 1613
1614/* PCI related functions */ 1614/* PCI related functions */
1615char *
1616qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
1617{
1618 struct qla_hw_data *ha = vha->hw;
1619 char lwstr[6];
1620 uint16_t lnk;
1621
1622 pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk);
1623 ha->link_width = (lnk >> 4) & 0x3f;
1624
1625 strcpy(str, "PCIe (");
1626 strcat(str, "2.5Gb/s ");
1627 snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
1628 strcat(str, lwstr);
1629 return str;
1630}
1631
1632int qla82xx_pci_region_offset(struct pci_dev *pdev, int region) 1615int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
1633{ 1616{
1634 unsigned long val = 0; 1617 unsigned long val = 0;
@@ -2320,6 +2303,29 @@ void qla82xx_init_flags(struct qla_hw_data *ha)
2320} 2303}
2321 2304
2322inline void 2305inline void
2306qla82xx_set_idc_version(scsi_qla_host_t *vha)
2307{
2308 int idc_ver;
2309 uint32_t drv_active;
2310 struct qla_hw_data *ha = vha->hw;
2311
2312 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2313 if (drv_active == (QLA82XX_DRV_ACTIVE << (ha->portnum * 4))) {
2314 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
2315 QLA82XX_IDC_VERSION);
2316 ql_log(ql_log_info, vha, 0xb082,
2317 "IDC version updated to %d\n", QLA82XX_IDC_VERSION);
2318 } else {
2319 idc_ver = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_IDC_VERSION);
2320 if (idc_ver != QLA82XX_IDC_VERSION)
2321 ql_log(ql_log_info, vha, 0xb083,
2322 "qla2xxx driver IDC version %d is not compatible "
2323 "with IDC version %d of the other drivers\n",
2324 QLA82XX_IDC_VERSION, idc_ver);
2325 }
2326}
2327
2328inline void
2323qla82xx_set_drv_active(scsi_qla_host_t *vha) 2329qla82xx_set_drv_active(scsi_qla_host_t *vha)
2324{ 2330{
2325 uint32_t drv_active; 2331 uint32_t drv_active;
@@ -2353,7 +2359,7 @@ qla82xx_need_reset(struct qla_hw_data *ha)
2353 uint32_t drv_state; 2359 uint32_t drv_state;
2354 int rval; 2360 int rval;
2355 2361
2356 if (ha->flags.isp82xx_reset_owner) 2362 if (ha->flags.nic_core_reset_owner)
2357 return 1; 2363 return 1;
2358 else { 2364 else {
2359 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2365 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
@@ -2860,7 +2866,7 @@ qla82xx_device_bootstrap(scsi_qla_host_t *vha)
2860 timeout = msleep_interruptible(200); 2866 timeout = msleep_interruptible(200);
2861 if (timeout) { 2867 if (timeout) {
2862 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2868 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2863 QLA82XX_DEV_FAILED); 2869 QLA8XXX_DEV_FAILED);
2864 return QLA_FUNCTION_FAILED; 2870 return QLA_FUNCTION_FAILED;
2865 } 2871 }
2866 2872
@@ -2891,10 +2897,7 @@ dev_initialize:
2891 /* set to DEV_INITIALIZING */ 2897 /* set to DEV_INITIALIZING */
2892 ql_log(ql_log_info, vha, 0x009e, 2898 ql_log(ql_log_info, vha, 0x009e,
2893 "HW State: INITIALIZING.\n"); 2899 "HW State: INITIALIZING.\n");
2894 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING); 2900 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
2895
2896 /* Driver that sets device state to initializating sets IDC version */
2897 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
2898 2901
2899 qla82xx_idc_unlock(ha); 2902 qla82xx_idc_unlock(ha);
2900 rval = qla82xx_start_firmware(vha); 2903 rval = qla82xx_start_firmware(vha);
@@ -2904,14 +2907,14 @@ dev_initialize:
2904 ql_log(ql_log_fatal, vha, 0x00ad, 2907 ql_log(ql_log_fatal, vha, 0x00ad,
2905 "HW State: FAILED.\n"); 2908 "HW State: FAILED.\n");
2906 qla82xx_clear_drv_active(ha); 2909 qla82xx_clear_drv_active(ha);
2907 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); 2910 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED);
2908 return rval; 2911 return rval;
2909 } 2912 }
2910 2913
2911dev_ready: 2914dev_ready:
2912 ql_log(ql_log_info, vha, 0x00ae, 2915 ql_log(ql_log_info, vha, 0x00ae,
2913 "HW State: READY.\n"); 2916 "HW State: READY.\n");
2914 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); 2917 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY);
2915 2918
2916 return QLA_SUCCESS; 2919 return QLA_SUCCESS;
2917} 2920}
@@ -2935,7 +2938,7 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
2935 2938
2936 if (vha->flags.online) { 2939 if (vha->flags.online) {
2937 /*Block any further I/O and wait for pending cmnds to complete*/ 2940 /*Block any further I/O and wait for pending cmnds to complete*/
2938 qla82xx_quiescent_state_cleanup(vha); 2941 qla2x00_quiesce_io(vha);
2939 } 2942 }
2940 2943
2941 /* Set the quiescence ready bit */ 2944 /* Set the quiescence ready bit */
@@ -2960,7 +2963,7 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
2960 "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME, 2963 "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
2961 drv_active, drv_state); 2964 drv_active, drv_state);
2962 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2965 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2963 QLA82XX_DEV_READY); 2966 QLA8XXX_DEV_READY);
2964 ql_log(ql_log_info, vha, 0xb025, 2967 ql_log(ql_log_info, vha, 0xb025,
2965 "HW State: DEV_READY.\n"); 2968 "HW State: DEV_READY.\n");
2966 qla82xx_idc_unlock(ha); 2969 qla82xx_idc_unlock(ha);
@@ -2981,10 +2984,10 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
2981 } 2984 }
2982 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2985 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2983 /* everyone acked so set the state to DEV_QUIESCENCE */ 2986 /* everyone acked so set the state to DEV_QUIESCENCE */
2984 if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) { 2987 if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
2985 ql_log(ql_log_info, vha, 0xb026, 2988 ql_log(ql_log_info, vha, 0xb026,
2986 "HW State: DEV_QUIESCENT.\n"); 2989 "HW State: DEV_QUIESCENT.\n");
2987 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT); 2990 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_QUIESCENT);
2988 } 2991 }
2989} 2992}
2990 2993
@@ -3014,8 +3017,8 @@ qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
3014 return dev_state; 3017 return dev_state;
3015} 3018}
3016 3019
3017static void 3020void
3018qla82xx_dev_failed_handler(scsi_qla_host_t *vha) 3021qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
3019{ 3022{
3020 struct qla_hw_data *ha = vha->hw; 3023 struct qla_hw_data *ha = vha->hw;
3021 3024
@@ -3023,9 +3026,10 @@ qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3023 ql_log(ql_log_fatal, vha, 0x00b8, 3026 ql_log(ql_log_fatal, vha, 0x00b8,
3024 "Disabling the board.\n"); 3027 "Disabling the board.\n");
3025 3028
3026 qla82xx_idc_lock(ha); 3029 if (IS_QLA82XX(ha)) {
3027 qla82xx_clear_drv_active(ha); 3030 qla82xx_clear_drv_active(ha);
3028 qla82xx_idc_unlock(ha); 3031 qla82xx_idc_unlock(ha);
3032 }
3029 3033
3030 /* Set DEV_FAILED flag to disable timer */ 3034 /* Set DEV_FAILED flag to disable timer */
3031 vha->device_flags |= DFLG_DEV_FAILED; 3035 vha->device_flags |= DFLG_DEV_FAILED;
@@ -3064,7 +3068,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3064 } 3068 }
3065 3069
3066 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3070 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3067 if (!ha->flags.isp82xx_reset_owner) { 3071 if (!ha->flags.nic_core_reset_owner) {
3068 ql_dbg(ql_dbg_p3p, vha, 0xb028, 3072 ql_dbg(ql_dbg_p3p, vha, 0xb028,
3069 "reset_acknowledged by 0x%x\n", ha->portnum); 3073 "reset_acknowledged by 0x%x\n", ha->portnum);
3070 qla82xx_set_rst_ready(ha); 3074 qla82xx_set_rst_ready(ha);
@@ -3076,7 +3080,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3076 } 3080 }
3077 3081
3078 /* wait for 10 seconds for reset ack from all functions */ 3082 /* wait for 10 seconds for reset ack from all functions */
3079 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); 3083 reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
3080 3084
3081 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3085 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3082 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3086 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
@@ -3088,7 +3092,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3088 drv_state, drv_active, dev_state, active_mask); 3092 drv_state, drv_active, dev_state, active_mask);
3089 3093
3090 while (drv_state != drv_active && 3094 while (drv_state != drv_active &&
3091 dev_state != QLA82XX_DEV_INITIALIZING) { 3095 dev_state != QLA8XXX_DEV_INITIALIZING) {
3092 if (time_after_eq(jiffies, reset_timeout)) { 3096 if (time_after_eq(jiffies, reset_timeout)) {
3093 ql_log(ql_log_warn, vha, 0x00b5, 3097 ql_log(ql_log_warn, vha, 0x00b5,
3094 "Reset timeout.\n"); 3098 "Reset timeout.\n");
@@ -3099,7 +3103,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3099 qla82xx_idc_lock(ha); 3103 qla82xx_idc_lock(ha);
3100 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3104 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3101 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3105 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3102 if (ha->flags.isp82xx_reset_owner) 3106 if (ha->flags.nic_core_reset_owner)
3103 drv_active &= active_mask; 3107 drv_active &= active_mask;
3104 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3108 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3105 } 3109 }
@@ -3115,11 +3119,11 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3115 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); 3119 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3116 3120
3117 /* Force to DEV_COLD unless someone else is starting a reset */ 3121 /* Force to DEV_COLD unless someone else is starting a reset */
3118 if (dev_state != QLA82XX_DEV_INITIALIZING && 3122 if (dev_state != QLA8XXX_DEV_INITIALIZING &&
3119 dev_state != QLA82XX_DEV_COLD) { 3123 dev_state != QLA8XXX_DEV_COLD) {
3120 ql_log(ql_log_info, vha, 0x00b7, 3124 ql_log(ql_log_info, vha, 0x00b7,
3121 "HW State: COLD/RE-INIT.\n"); 3125 "HW State: COLD/RE-INIT.\n");
3122 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 3126 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD);
3123 qla82xx_set_rst_ready(ha); 3127 qla82xx_set_rst_ready(ha);
3124 if (ql2xmdenable) { 3128 if (ql2xmdenable) {
3125 if (qla82xx_md_collect(vha)) 3129 if (qla82xx_md_collect(vha))
@@ -3226,8 +3230,10 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3226 int loopcount = 0; 3230 int loopcount = 0;
3227 3231
3228 qla82xx_idc_lock(ha); 3232 qla82xx_idc_lock(ha);
3229 if (!vha->flags.init_done) 3233 if (!vha->flags.init_done) {
3230 qla82xx_set_drv_active(vha); 3234 qla82xx_set_drv_active(vha);
3235 qla82xx_set_idc_version(vha);
3236 }
3231 3237
3232 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3238 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3233 old_dev_state = dev_state; 3239 old_dev_state = dev_state;
@@ -3237,7 +3243,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3237 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); 3243 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3238 3244
3239 /* wait for 30 seconds for device to go ready */ 3245 /* wait for 30 seconds for device to go ready */
3240 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 3246 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
3241 3247
3242 while (1) { 3248 while (1) {
3243 3249
@@ -3261,18 +3267,18 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3261 } 3267 }
3262 3268
3263 switch (dev_state) { 3269 switch (dev_state) {
3264 case QLA82XX_DEV_READY: 3270 case QLA8XXX_DEV_READY:
3265 ha->flags.isp82xx_reset_owner = 0; 3271 ha->flags.nic_core_reset_owner = 0;
3266 goto exit; 3272 goto rel_lock;
3267 case QLA82XX_DEV_COLD: 3273 case QLA8XXX_DEV_COLD:
3268 rval = qla82xx_device_bootstrap(vha); 3274 rval = qla82xx_device_bootstrap(vha);
3269 break; 3275 break;
3270 case QLA82XX_DEV_INITIALIZING: 3276 case QLA8XXX_DEV_INITIALIZING:
3271 qla82xx_idc_unlock(ha); 3277 qla82xx_idc_unlock(ha);
3272 msleep(1000); 3278 msleep(1000);
3273 qla82xx_idc_lock(ha); 3279 qla82xx_idc_lock(ha);
3274 break; 3280 break;
3275 case QLA82XX_DEV_NEED_RESET: 3281 case QLA8XXX_DEV_NEED_RESET:
3276 if (!ql2xdontresethba) 3282 if (!ql2xdontresethba)
3277 qla82xx_need_reset_handler(vha); 3283 qla82xx_need_reset_handler(vha);
3278 else { 3284 else {
@@ -3281,31 +3287,31 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3281 qla82xx_idc_lock(ha); 3287 qla82xx_idc_lock(ha);
3282 } 3288 }
3283 dev_init_timeout = jiffies + 3289 dev_init_timeout = jiffies +
3284 (ha->nx_dev_init_timeout * HZ); 3290 (ha->fcoe_dev_init_timeout * HZ);
3285 break; 3291 break;
3286 case QLA82XX_DEV_NEED_QUIESCENT: 3292 case QLA8XXX_DEV_NEED_QUIESCENT:
3287 qla82xx_need_qsnt_handler(vha); 3293 qla82xx_need_qsnt_handler(vha);
3288 /* Reset timeout value after quiescence handler */ 3294 /* Reset timeout value after quiescence handler */
3289 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\ 3295 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
3290 * HZ); 3296 * HZ);
3291 break; 3297 break;
3292 case QLA82XX_DEV_QUIESCENT: 3298 case QLA8XXX_DEV_QUIESCENT:
3293 /* Owner will exit and other will wait for the state 3299 /* Owner will exit and other will wait for the state
3294 * to get changed 3300 * to get changed
3295 */ 3301 */
3296 if (ha->flags.quiesce_owner) 3302 if (ha->flags.quiesce_owner)
3297 goto exit; 3303 goto rel_lock;
3298 3304
3299 qla82xx_idc_unlock(ha); 3305 qla82xx_idc_unlock(ha);
3300 msleep(1000); 3306 msleep(1000);
3301 qla82xx_idc_lock(ha); 3307 qla82xx_idc_lock(ha);
3302 3308
3303 /* Reset timeout value after quiescence handler */ 3309 /* Reset timeout value after quiescence handler */
3304 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\ 3310 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
3305 * HZ); 3311 * HZ);
3306 break; 3312 break;
3307 case QLA82XX_DEV_FAILED: 3313 case QLA8XXX_DEV_FAILED:
3308 qla82xx_dev_failed_handler(vha); 3314 qla8xxx_dev_failed_handler(vha);
3309 rval = QLA_FUNCTION_FAILED; 3315 rval = QLA_FUNCTION_FAILED;
3310 goto exit; 3316 goto exit;
3311 default: 3317 default:
@@ -3315,8 +3321,9 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3315 } 3321 }
3316 loopcount++; 3322 loopcount++;
3317 } 3323 }
3318exit: 3324rel_lock:
3319 qla82xx_idc_unlock(ha); 3325 qla82xx_idc_unlock(ha);
3326exit:
3320 return rval; 3327 return rval;
3321} 3328}
3322 3329
@@ -3364,22 +3371,30 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3364 struct qla_hw_data *ha = vha->hw; 3371 struct qla_hw_data *ha = vha->hw;
3365 3372
3366 /* don't poll if reset is going on */ 3373 /* don't poll if reset is going on */
3367 if (!ha->flags.isp82xx_reset_hdlr_active) { 3374 if (!ha->flags.nic_core_reset_hdlr_active) {
3368 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3375 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3369 if (qla82xx_check_temp(vha)) { 3376 if (qla82xx_check_temp(vha)) {
3370 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); 3377 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
3371 ha->flags.isp82xx_fw_hung = 1; 3378 ha->flags.isp82xx_fw_hung = 1;
3372 qla82xx_clear_pending_mbx(vha); 3379 qla82xx_clear_pending_mbx(vha);
3373 } else if (dev_state == QLA82XX_DEV_NEED_RESET && 3380 } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
3374 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { 3381 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3375 ql_log(ql_log_warn, vha, 0x6001, 3382 ql_log(ql_log_warn, vha, 0x6001,
3376 "Adapter reset needed.\n"); 3383 "Adapter reset needed.\n");
3377 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3384 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3378 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 3385 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
3379 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { 3386 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3380 ql_log(ql_log_warn, vha, 0x6002, 3387 ql_log(ql_log_warn, vha, 0x6002,
3381 "Quiescent needed.\n"); 3388 "Quiescent needed.\n");
3382 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 3389 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3390 } else if (dev_state == QLA8XXX_DEV_FAILED &&
3391 !test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) &&
3392 vha->flags.online == 1) {
3393 ql_log(ql_log_warn, vha, 0xb055,
3394 "Adapter state is failed. Offlining.\n");
3395 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
3396 ha->flags.isp82xx_fw_hung = 1;
3397 qla82xx_clear_pending_mbx(vha);
3383 } else { 3398 } else {
3384 if (qla82xx_check_fw_alive(vha)) { 3399 if (qla82xx_check_fw_alive(vha)) {
3385 ql_dbg(ql_dbg_timer, vha, 0x6011, 3400 ql_dbg(ql_dbg_timer, vha, 0x6011,
@@ -3441,12 +3456,12 @@ qla82xx_set_reset_owner(scsi_qla_host_t *vha)
3441 uint32_t dev_state; 3456 uint32_t dev_state;
3442 3457
3443 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3458 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3444 if (dev_state == QLA82XX_DEV_READY) { 3459 if (dev_state == QLA8XXX_DEV_READY) {
3445 ql_log(ql_log_info, vha, 0xb02f, 3460 ql_log(ql_log_info, vha, 0xb02f,
3446 "HW State: NEED RESET\n"); 3461 "HW State: NEED RESET\n");
3447 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3462 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3448 QLA82XX_DEV_NEED_RESET); 3463 QLA8XXX_DEV_NEED_RESET);
3449 ha->flags.isp82xx_reset_owner = 1; 3464 ha->flags.nic_core_reset_owner = 1;
3450 ql_dbg(ql_dbg_p3p, vha, 0xb030, 3465 ql_dbg(ql_dbg_p3p, vha, 0xb030,
3451 "reset_owner is 0x%x\n", ha->portnum); 3466 "reset_owner is 0x%x\n", ha->portnum);
3452 } else 3467 } else
@@ -3477,7 +3492,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3477 "Device in failed state, exiting.\n"); 3492 "Device in failed state, exiting.\n");
3478 return QLA_SUCCESS; 3493 return QLA_SUCCESS;
3479 } 3494 }
3480 ha->flags.isp82xx_reset_hdlr_active = 1; 3495 ha->flags.nic_core_reset_hdlr_active = 1;
3481 3496
3482 qla82xx_idc_lock(ha); 3497 qla82xx_idc_lock(ha);
3483 qla82xx_set_reset_owner(vha); 3498 qla82xx_set_reset_owner(vha);
@@ -3491,7 +3506,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3491 3506
3492 if (rval == QLA_SUCCESS) { 3507 if (rval == QLA_SUCCESS) {
3493 ha->flags.isp82xx_fw_hung = 0; 3508 ha->flags.isp82xx_fw_hung = 0;
3494 ha->flags.isp82xx_reset_hdlr_active = 0; 3509 ha->flags.nic_core_reset_hdlr_active = 0;
3495 qla82xx_restart_isp(vha); 3510 qla82xx_restart_isp(vha);
3496 } 3511 }
3497 3512
@@ -4026,7 +4041,7 @@ qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
4026 4041
4027 if (r_addr & 0xf) { 4042 if (r_addr & 0xf) {
4028 ql_log(ql_log_warn, vha, 0xb033, 4043 ql_log(ql_log_warn, vha, 0xb033,
4029 "Read addr 0x%x not 16 bytes alligned\n", r_addr); 4044 "Read addr 0x%x not 16 bytes aligned\n", r_addr);
4030 return rval; 4045 return rval;
4031 } 4046 }
4032 4047
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6eb210e3cc6..6c953e8c08f 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -542,14 +542,15 @@
542#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174)) 542#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
543 543
544/* Every driver should use these Device State */ 544/* Every driver should use these Device State */
545#define QLA82XX_DEV_COLD 1 545#define QLA8XXX_DEV_COLD 1
546#define QLA82XX_DEV_INITIALIZING 2 546#define QLA8XXX_DEV_INITIALIZING 2
547#define QLA82XX_DEV_READY 3 547#define QLA8XXX_DEV_READY 3
548#define QLA82XX_DEV_NEED_RESET 4 548#define QLA8XXX_DEV_NEED_RESET 4
549#define QLA82XX_DEV_NEED_QUIESCENT 5 549#define QLA8XXX_DEV_NEED_QUIESCENT 5
550#define QLA82XX_DEV_FAILED 6 550#define QLA8XXX_DEV_FAILED 6
551#define QLA82XX_DEV_QUIESCENT 7 551#define QLA8XXX_DEV_QUIESCENT 7
552#define MAX_STATES 8 /* Increment if new state added */ 552#define MAX_STATES 8 /* Increment if new state added */
553#define QLA8XXX_BAD_VALUE 0xbad0bad0
553 554
554#define QLA82XX_IDC_VERSION 1 555#define QLA82XX_IDC_VERSION 1
555#define QLA82XX_ROM_DEV_INIT_TIMEOUT 30 556#define QLA82XX_ROM_DEV_INIT_TIMEOUT 30
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index d3052622e77..d501bf5f806 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -113,11 +113,11 @@ MODULE_PARM_DESC(ql2xfdmienable,
113static int ql2xmaxqdepth = MAX_Q_DEPTH; 113static int ql2xmaxqdepth = MAX_Q_DEPTH;
114module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); 114module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
115MODULE_PARM_DESC(ql2xmaxqdepth, 115MODULE_PARM_DESC(ql2xmaxqdepth,
116 "Maximum queue depth to report for target devices."); 116 "Maximum queue depth to set for each LUN. "
117 "Default is 32.");
117 118
118/* Do not change the value of this after module load */ 119int ql2xenabledif = 2;
119int ql2xenabledif = 0; 120module_param(ql2xenabledif, int, S_IRUGO);
120module_param(ql2xenabledif, int, S_IRUGO|S_IWUSR);
121MODULE_PARM_DESC(ql2xenabledif, 121MODULE_PARM_DESC(ql2xenabledif,
122 " Enable T10-CRC-DIF " 122 " Enable T10-CRC-DIF "
123 " Default is 0 - No DIF Support. 1 - Enable it" 123 " Default is 0 - No DIF Support. 1 - Enable it"
@@ -1078,7 +1078,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
1078 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, 1078 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
1079 cmd->device->lun, type) != QLA_SUCCESS) { 1079 cmd->device->lun, type) != QLA_SUCCESS) {
1080 ql_log(ql_log_warn, vha, 0x800d, 1080 ql_log(ql_log_warn, vha, 0x800d,
1081 "wait for peding cmds failed for cmd=%p.\n", cmd); 1081 "wait for pending cmds failed for cmd=%p.\n", cmd);
1082 goto eh_reset_failed; 1082 goto eh_reset_failed;
1083 } 1083 }
1084 1084
@@ -1177,7 +1177,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1177eh_bus_reset_done: 1177eh_bus_reset_done:
1178 ql_log(ql_log_warn, vha, 0x802b, 1178 ql_log(ql_log_warn, vha, 0x802b,
1179 "BUS RESET %s nexus=%ld:%d:%d.\n", 1179 "BUS RESET %s nexus=%ld:%d:%d.\n",
1180 (ret == FAILED) ? "FAILED" : "SUCCEDED", vha->host_no, id, lun); 1180 (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
1181 1181
1182 return ret; 1182 return ret;
1183} 1183}
@@ -1357,6 +1357,9 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1357 scsi_qla_host_t *vha = shost_priv(sdev->host); 1357 scsi_qla_host_t *vha = shost_priv(sdev->host);
1358 struct req_que *req = vha->req; 1358 struct req_que *req = vha->req;
1359 1359
1360 if (IS_T10_PI_CAPABLE(vha->hw))
1361 blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1362
1360 if (sdev->tagged_supported) 1363 if (sdev->tagged_supported)
1361 scsi_activate_tcq(sdev, req->max_q_depth); 1364 scsi_activate_tcq(sdev, req->max_q_depth);
1362 else 1365 else
@@ -1919,7 +1922,7 @@ static struct isp_operations qla82xx_isp_ops = {
1919 .nvram_config = qla81xx_nvram_config, 1922 .nvram_config = qla81xx_nvram_config,
1920 .update_fw_options = qla24xx_update_fw_options, 1923 .update_fw_options = qla24xx_update_fw_options,
1921 .load_risc = qla82xx_load_risc, 1924 .load_risc = qla82xx_load_risc,
1922 .pci_info_str = qla82xx_pci_info_str, 1925 .pci_info_str = qla24xx_pci_info_str,
1923 .fw_version_str = qla24xx_fw_version_str, 1926 .fw_version_str = qla24xx_fw_version_str,
1924 .intr_handler = qla82xx_intr_handler, 1927 .intr_handler = qla82xx_intr_handler,
1925 .enable_intrs = qla82xx_enable_intrs, 1928 .enable_intrs = qla82xx_enable_intrs,
@@ -2149,7 +2152,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2149 scsi_qla_host_t *base_vha = NULL; 2152 scsi_qla_host_t *base_vha = NULL;
2150 struct qla_hw_data *ha; 2153 struct qla_hw_data *ha;
2151 char pci_info[30]; 2154 char pci_info[30];
2152 char fw_str[30]; 2155 char fw_str[30], wq_name[30];
2153 struct scsi_host_template *sht; 2156 struct scsi_host_template *sht;
2154 int bars, mem_only = 0; 2157 int bars, mem_only = 0;
2155 uint16_t req_length = 0, rsp_length = 0; 2158 uint16_t req_length = 0, rsp_length = 0;
@@ -2203,12 +2206,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2203 ha->mem_only = mem_only; 2206 ha->mem_only = mem_only;
2204 spin_lock_init(&ha->hardware_lock); 2207 spin_lock_init(&ha->hardware_lock);
2205 spin_lock_init(&ha->vport_slock); 2208 spin_lock_init(&ha->vport_slock);
2209 mutex_init(&ha->selflogin_lock);
2206 2210
2207 /* Set ISP-type information. */ 2211 /* Set ISP-type information. */
2208 qla2x00_set_isp_flags(ha); 2212 qla2x00_set_isp_flags(ha);
2209 2213
2210 /* Set EEH reset type to fundamental if required by hba */ 2214 /* Set EEH reset type to fundamental if required by hba */
2211 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) 2215 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
2216 IS_QLA83XX(ha))
2212 pdev->needs_freset = 1; 2217 pdev->needs_freset = 1;
2213 2218
2214 ha->prev_topology = 0; 2219 ha->prev_topology = 0;
@@ -2318,6 +2323,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2318 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2323 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2319 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2324 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2320 } else if (IS_QLA83XX(ha)) { 2325 } else if (IS_QLA83XX(ha)) {
2326 ha->portnum = PCI_FUNC(ha->pdev->devfn);
2321 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2327 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2322 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2328 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2323 req_length = REQUEST_ENTRY_CNT_24XX; 2329 req_length = REQUEST_ENTRY_CNT_24XX;
@@ -2416,7 +2422,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2416 host->can_queue, base_vha->req, 2422 host->can_queue, base_vha->req,
2417 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 2423 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
2418 host->max_id = ha->max_fibre_devices; 2424 host->max_id = ha->max_fibre_devices;
2419 host->this_id = 255;
2420 host->cmd_per_lun = 3; 2425 host->cmd_per_lun = 3;
2421 host->unique_id = host->host_no; 2426 host->unique_id = host->host_no;
2422 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 2427 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
@@ -2499,7 +2504,7 @@ que_init:
2499 if (IS_QLA82XX(ha)) { 2504 if (IS_QLA82XX(ha)) {
2500 qla82xx_idc_lock(ha); 2505 qla82xx_idc_lock(ha);
2501 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2506 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2502 QLA82XX_DEV_FAILED); 2507 QLA8XXX_DEV_FAILED);
2503 qla82xx_idc_unlock(ha); 2508 qla82xx_idc_unlock(ha);
2504 ql_log(ql_log_fatal, base_vha, 0x00d7, 2509 ql_log(ql_log_fatal, base_vha, 0x00d7,
2505 "HW State: FAILED.\n"); 2510 "HW State: FAILED.\n");
@@ -2542,6 +2547,20 @@ que_init:
2542 */ 2547 */
2543 qla2xxx_wake_dpc(base_vha); 2548 qla2xxx_wake_dpc(base_vha);
2544 2549
2550 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
2551 sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
2552 ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
2553 INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
2554
2555 sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
2556 ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
2557 INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
2558 INIT_WORK(&ha->idc_state_handler,
2559 qla83xx_idc_state_handler_work);
2560 INIT_WORK(&ha->nic_core_unrecoverable,
2561 qla83xx_nic_core_unrecoverable_work);
2562 }
2563
2545skip_dpc: 2564skip_dpc:
2546 list_add_tail(&base_vha->list, &ha->vp_list); 2565 list_add_tail(&base_vha->list, &ha->vp_list);
2547 base_vha->host->irq = ha->pdev->irq; 2566 base_vha->host->irq = ha->pdev->irq;
@@ -2557,7 +2576,7 @@ skip_dpc:
2557 2576
2558 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { 2577 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
2559 if (ha->fw_attributes & BIT_4) { 2578 if (ha->fw_attributes & BIT_4) {
2560 int prot = 0; 2579 int prot = 0, guard;
2561 base_vha->flags.difdix_supported = 1; 2580 base_vha->flags.difdix_supported = 1;
2562 ql_dbg(ql_dbg_init, base_vha, 0x00f1, 2581 ql_dbg(ql_dbg_init, base_vha, 0x00f1,
2563 "Registering for DIF/DIX type 1 and 3 protection.\n"); 2582 "Registering for DIF/DIX type 1 and 3 protection.\n");
@@ -2570,7 +2589,14 @@ skip_dpc:
2570 | SHOST_DIX_TYPE1_PROTECTION 2589 | SHOST_DIX_TYPE1_PROTECTION
2571 | SHOST_DIX_TYPE2_PROTECTION 2590 | SHOST_DIX_TYPE2_PROTECTION
2572 | SHOST_DIX_TYPE3_PROTECTION); 2591 | SHOST_DIX_TYPE3_PROTECTION);
2573 scsi_host_set_guard(host, SHOST_DIX_GUARD_CRC); 2592
2593 guard = SHOST_DIX_GUARD_CRC;
2594
2595 if (IS_PI_IPGUARD_CAPABLE(ha) &&
2596 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
2597 guard |= SHOST_DIX_GUARD_IP;
2598
2599 scsi_host_set_guard(host, guard);
2574 } else 2600 } else
2575 base_vha->flags.difdix_supported = 0; 2601 base_vha->flags.difdix_supported = 0;
2576 } 2602 }
@@ -2750,6 +2776,14 @@ qla2x00_remove_one(struct pci_dev *pdev)
2750 } 2776 }
2751 mutex_unlock(&ha->vport_lock); 2777 mutex_unlock(&ha->vport_lock);
2752 2778
2779 if (IS_QLA8031(ha)) {
2780 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
2781 "Clearing fcoe driver presence.\n");
2782 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
2783 ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
2784 "Error while clearing DRV-Presence.\n");
2785 }
2786
2753 set_bit(UNLOADING, &base_vha->dpc_flags); 2787 set_bit(UNLOADING, &base_vha->dpc_flags);
2754 2788
2755 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 2789 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
@@ -2771,6 +2805,21 @@ qla2x00_remove_one(struct pci_dev *pdev)
2771 ha->wq = NULL; 2805 ha->wq = NULL;
2772 } 2806 }
2773 2807
2808 /* Cancel all work and destroy DPC workqueues */
2809 if (ha->dpc_lp_wq) {
2810 cancel_work_sync(&ha->idc_aen);
2811 destroy_workqueue(ha->dpc_lp_wq);
2812 ha->dpc_lp_wq = NULL;
2813 }
2814
2815 if (ha->dpc_hp_wq) {
2816 cancel_work_sync(&ha->nic_core_reset);
2817 cancel_work_sync(&ha->idc_state_handler);
2818 cancel_work_sync(&ha->nic_core_unrecoverable);
2819 destroy_workqueue(ha->dpc_hp_wq);
2820 ha->dpc_hp_wq = NULL;
2821 }
2822
2774 /* Kill the kernel thread for this host */ 2823 /* Kill the kernel thread for this host */
2775 if (ha->dpc_thread) { 2824 if (ha->dpc_thread) {
2776 struct task_struct *t = ha->dpc_thread; 2825 struct task_struct *t = ha->dpc_thread;
@@ -2837,7 +2886,6 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2837 qla2x00_stop_dpc_thread(vha); 2886 qla2x00_stop_dpc_thread(vha);
2838 2887
2839 qla25xx_delete_queues(vha); 2888 qla25xx_delete_queues(vha);
2840
2841 if (ha->flags.fce_enabled) 2889 if (ha->flags.fce_enabled)
2842 qla2x00_disable_fce_trace(vha, NULL, NULL); 2890 qla2x00_disable_fce_trace(vha, NULL, NULL);
2843 2891
@@ -2872,6 +2920,7 @@ void qla2x00_free_fcports(struct scsi_qla_host *vha)
2872 2920
2873 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) { 2921 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
2874 list_del(&fcport->list); 2922 list_del(&fcport->list);
2923 qla2x00_clear_loop_id(fcport);
2875 kfree(fcport); 2924 kfree(fcport);
2876 fcport = NULL; 2925 fcport = NULL;
2877 } 2926 }
@@ -3169,6 +3218,18 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3169 } 3218 }
3170 3219
3171 INIT_LIST_HEAD(&ha->vp_list); 3220 INIT_LIST_HEAD(&ha->vp_list);
3221
3222 /* Allocate memory for our loop_id bitmap */
3223 ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
3224 GFP_KERNEL);
3225 if (!ha->loop_id_map)
3226 goto fail_async_pd;
3227 else {
3228 qla2x00_set_reserved_loop_ids(ha);
3229 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
3230 "loop_id_map=%p. \n", ha->loop_id_map);
3231 }
3232
3172 return 1; 3233 return 1;
3173 3234
3174fail_async_pd: 3235fail_async_pd:
@@ -3280,6 +3341,10 @@ qla2x00_mem_free(struct qla_hw_data *ha)
3280{ 3341{
3281 qla2x00_free_fw_dump(ha); 3342 qla2x00_free_fw_dump(ha);
3282 3343
3344 if (ha->mctp_dump)
3345 dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
3346 ha->mctp_dump_dma);
3347
3283 if (ha->srb_mempool) 3348 if (ha->srb_mempool)
3284 mempool_destroy(ha->srb_mempool); 3349 mempool_destroy(ha->srb_mempool);
3285 3350
@@ -3352,6 +3417,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
3352 kfree(ha->nvram); 3417 kfree(ha->nvram);
3353 kfree(ha->npiv_info); 3418 kfree(ha->npiv_info);
3354 kfree(ha->swl); 3419 kfree(ha->swl);
3420 kfree(ha->loop_id_map);
3355 3421
3356 ha->srb_mempool = NULL; 3422 ha->srb_mempool = NULL;
3357 ha->ctx_mempool = NULL; 3423 ha->ctx_mempool = NULL;
@@ -3687,13 +3753,651 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
3687 } 3753 }
3688 3754
3689 if (fcport->login_retry == 0 && status != QLA_SUCCESS) 3755 if (fcport->login_retry == 0 && status != QLA_SUCCESS)
3690 fcport->loop_id = FC_NO_LOOP_ID; 3756 qla2x00_clear_loop_id(fcport);
3691 } 3757 }
3692 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 3758 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3693 break; 3759 break;
3694 } 3760 }
3695} 3761}
3696 3762
3763/* Schedule work on any of the dpc-workqueues */
3764void
3765qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code)
3766{
3767 struct qla_hw_data *ha = base_vha->hw;
3768
3769 switch (work_code) {
3770 case MBA_IDC_AEN: /* 0x8200 */
3771 if (ha->dpc_lp_wq)
3772 queue_work(ha->dpc_lp_wq, &ha->idc_aen);
3773 break;
3774
3775 case QLA83XX_NIC_CORE_RESET: /* 0x1 */
3776 if (!ha->flags.nic_core_reset_hdlr_active) {
3777 if (ha->dpc_hp_wq)
3778 queue_work(ha->dpc_hp_wq, &ha->nic_core_reset);
3779 } else
3780 ql_dbg(ql_dbg_p3p, base_vha, 0xb05e,
3781 "NIC Core reset is already active. Skip "
3782 "scheduling it again.\n");
3783 break;
3784 case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */
3785 if (ha->dpc_hp_wq)
3786 queue_work(ha->dpc_hp_wq, &ha->idc_state_handler);
3787 break;
3788 case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */
3789 if (ha->dpc_hp_wq)
3790 queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable);
3791 break;
3792 default:
3793 ql_log(ql_log_warn, base_vha, 0xb05f,
3794 "Unknow work-code=0x%x.\n", work_code);
3795 }
3796
3797 return;
3798}
3799
3800/* Work: Perform NIC Core Unrecoverable state handling */
3801void
3802qla83xx_nic_core_unrecoverable_work(struct work_struct *work)
3803{
3804 struct qla_hw_data *ha =
3805 container_of(work, struct qla_hw_data, nic_core_unrecoverable);
3806 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3807 uint32_t dev_state = 0;
3808
3809 qla83xx_idc_lock(base_vha, 0);
3810 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
3811 qla83xx_reset_ownership(base_vha);
3812 if (ha->flags.nic_core_reset_owner) {
3813 ha->flags.nic_core_reset_owner = 0;
3814 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
3815 QLA8XXX_DEV_FAILED);
3816 ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n");
3817 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
3818 }
3819 qla83xx_idc_unlock(base_vha, 0);
3820}
3821
3822/* Work: Execute IDC state handler */
3823void
3824qla83xx_idc_state_handler_work(struct work_struct *work)
3825{
3826 struct qla_hw_data *ha =
3827 container_of(work, struct qla_hw_data, idc_state_handler);
3828 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3829 uint32_t dev_state = 0;
3830
3831 qla83xx_idc_lock(base_vha, 0);
3832 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
3833 if (dev_state == QLA8XXX_DEV_FAILED ||
3834 dev_state == QLA8XXX_DEV_NEED_QUIESCENT)
3835 qla83xx_idc_state_handler(base_vha);
3836 qla83xx_idc_unlock(base_vha, 0);
3837}
3838
3839int
3840qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
3841{
3842 int rval = QLA_SUCCESS;
3843 unsigned long heart_beat_wait = jiffies + (1 * HZ);
3844 uint32_t heart_beat_counter1, heart_beat_counter2;
3845
3846 do {
3847 if (time_after(jiffies, heart_beat_wait)) {
3848 ql_dbg(ql_dbg_p3p, base_vha, 0xb07c,
3849 "Nic Core f/w is not alive.\n");
3850 rval = QLA_FUNCTION_FAILED;
3851 break;
3852 }
3853
3854 qla83xx_idc_lock(base_vha, 0);
3855 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
3856 &heart_beat_counter1);
3857 qla83xx_idc_unlock(base_vha, 0);
3858 msleep(100);
3859 qla83xx_idc_lock(base_vha, 0);
3860 qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
3861 &heart_beat_counter2);
3862 qla83xx_idc_unlock(base_vha, 0);
3863 } while (heart_beat_counter1 == heart_beat_counter2);
3864
3865 return rval;
3866}
3867
3868/* Work: Perform NIC Core Reset handling */
3869void
3870qla83xx_nic_core_reset_work(struct work_struct *work)
3871{
3872 struct qla_hw_data *ha =
3873 container_of(work, struct qla_hw_data, nic_core_reset);
3874 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3875 uint32_t dev_state = 0;
3876
3877 if (IS_QLA2031(ha)) {
3878 if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS)
3879 ql_log(ql_log_warn, base_vha, 0xb081,
3880 "Failed to dump mctp\n");
3881 return;
3882 }
3883
3884 if (!ha->flags.nic_core_reset_hdlr_active) {
3885 if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) {
3886 qla83xx_idc_lock(base_vha, 0);
3887 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE,
3888 &dev_state);
3889 qla83xx_idc_unlock(base_vha, 0);
3890 if (dev_state != QLA8XXX_DEV_NEED_RESET) {
3891 ql_dbg(ql_dbg_p3p, base_vha, 0xb07a,
3892 "Nic Core f/w is alive.\n");
3893 return;
3894 }
3895 }
3896
3897 ha->flags.nic_core_reset_hdlr_active = 1;
3898 if (qla83xx_nic_core_reset(base_vha)) {
3899 /* NIC Core reset failed. */
3900 ql_dbg(ql_dbg_p3p, base_vha, 0xb061,
3901 "NIC Core reset failed.\n");
3902 }
3903 ha->flags.nic_core_reset_hdlr_active = 0;
3904 }
3905}
3906
3907/* Work: Handle 8200 IDC aens */
3908void
3909qla83xx_service_idc_aen(struct work_struct *work)
3910{
3911 struct qla_hw_data *ha =
3912 container_of(work, struct qla_hw_data, idc_aen);
3913 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3914 uint32_t dev_state, idc_control;
3915
3916 qla83xx_idc_lock(base_vha, 0);
3917 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
3918 qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control);
3919 qla83xx_idc_unlock(base_vha, 0);
3920 if (dev_state == QLA8XXX_DEV_NEED_RESET) {
3921 if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) {
3922 ql_dbg(ql_dbg_p3p, base_vha, 0xb062,
3923 "Application requested NIC Core Reset.\n");
3924 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
3925 } else if (qla83xx_check_nic_core_fw_alive(base_vha) ==
3926 QLA_SUCCESS) {
3927 ql_dbg(ql_dbg_p3p, base_vha, 0xb07b,
3928 "Other protocol driver requested NIC Core Reset.\n");
3929 qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
3930 }
3931 } else if (dev_state == QLA8XXX_DEV_FAILED ||
3932 dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
3933 qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
3934 }
3935}
3936
3937static void
3938qla83xx_wait_logic(void)
3939{
3940 int i;
3941
3942 /* Yield CPU */
3943 if (!in_interrupt()) {
3944 /*
3945 * Wait about 200ms before retrying again.
3946 * This controls the number of retries for single
3947 * lock operation.
3948 */
3949 msleep(100);
3950 schedule();
3951 } else {
3952 for (i = 0; i < 20; i++)
3953 cpu_relax(); /* This a nop instr on i386 */
3954 }
3955}
3956
3957int
3958qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
3959{
3960 int rval;
3961 uint32_t data;
3962 uint32_t idc_lck_rcvry_stage_mask = 0x3;
3963 uint32_t idc_lck_rcvry_owner_mask = 0x3c;
3964 struct qla_hw_data *ha = base_vha->hw;
3965
3966 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
3967 if (rval)
3968 return rval;
3969
3970 if ((data & idc_lck_rcvry_stage_mask) > 0) {
3971 return QLA_SUCCESS;
3972 } else {
3973 data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2);
3974 rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
3975 data);
3976 if (rval)
3977 return rval;
3978
3979 msleep(200);
3980
3981 rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
3982 &data);
3983 if (rval)
3984 return rval;
3985
3986 if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) {
3987 data &= (IDC_LOCK_RECOVERY_STAGE2 |
3988 ~(idc_lck_rcvry_stage_mask));
3989 rval = qla83xx_wr_reg(base_vha,
3990 QLA83XX_IDC_LOCK_RECOVERY, data);
3991 if (rval)
3992 return rval;
3993
3994 /* Forcefully perform IDC UnLock */
3995 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK,
3996 &data);
3997 if (rval)
3998 return rval;
3999 /* Clear lock-id by setting 0xff */
4000 rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
4001 0xff);
4002 if (rval)
4003 return rval;
4004 /* Clear lock-recovery by setting 0x0 */
4005 rval = qla83xx_wr_reg(base_vha,
4006 QLA83XX_IDC_LOCK_RECOVERY, 0x0);
4007 if (rval)
4008 return rval;
4009 } else
4010 return QLA_SUCCESS;
4011 }
4012
4013 return rval;
4014}
4015
4016int
4017qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
4018{
4019 int rval = QLA_SUCCESS;
4020 uint32_t o_drv_lockid, n_drv_lockid;
4021 unsigned long lock_recovery_timeout;
4022
4023 lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT;
4024retry_lockid:
4025 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid);
4026 if (rval)
4027 goto exit;
4028
4029 /* MAX wait time before forcing IDC Lock recovery = 2 secs */
4030 if (time_after_eq(jiffies, lock_recovery_timeout)) {
4031 if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS)
4032 return QLA_SUCCESS;
4033 else
4034 return QLA_FUNCTION_FAILED;
4035 }
4036
4037 rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid);
4038 if (rval)
4039 goto exit;
4040
4041 if (o_drv_lockid == n_drv_lockid) {
4042 qla83xx_wait_logic();
4043 goto retry_lockid;
4044 } else
4045 return QLA_SUCCESS;
4046
4047exit:
4048 return rval;
4049}
4050
4051void
4052qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
4053{
4054 uint16_t options = (requester_id << 15) | BIT_6;
4055 uint32_t data;
4056 struct qla_hw_data *ha = base_vha->hw;
4057
4058 /* IDC-lock implementation using driver-lock/lock-id remote registers */
4059retry_lock:
4060 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
4061 == QLA_SUCCESS) {
4062 if (data) {
4063 /* Setting lock-id to our function-number */
4064 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
4065 ha->portnum);
4066 } else {
4067 ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
4068 "Failed to acquire IDC lock. retrying...\n");
4069
4070 /* Retry/Perform IDC-Lock recovery */
4071 if (qla83xx_idc_lock_recovery(base_vha)
4072 == QLA_SUCCESS) {
4073 qla83xx_wait_logic();
4074 goto retry_lock;
4075 } else
4076 ql_log(ql_log_warn, base_vha, 0xb075,
4077 "IDC Lock recovery FAILED.\n");
4078 }
4079
4080 }
4081
4082 return;
4083
4084 /* XXX: IDC-lock implementation using access-control mbx */
4085retry_lock2:
4086 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
4087 ql_dbg(ql_dbg_p3p, base_vha, 0xb072,
4088 "Failed to acquire IDC lock. retrying...\n");
4089 /* Retry/Perform IDC-Lock recovery */
4090 if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) {
4091 qla83xx_wait_logic();
4092 goto retry_lock2;
4093 } else
4094 ql_log(ql_log_warn, base_vha, 0xb076,
4095 "IDC Lock recovery FAILED.\n");
4096 }
4097
4098 return;
4099}
4100
4101void
4102qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
4103{
4104 uint16_t options = (requester_id << 15) | BIT_7, retry;
4105 uint32_t data;
4106 struct qla_hw_data *ha = base_vha->hw;
4107
4108 /* IDC-unlock implementation using driver-unlock/lock-id
4109 * remote registers
4110 */
4111 retry = 0;
4112retry_unlock:
4113 if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data)
4114 == QLA_SUCCESS) {
4115 if (data == ha->portnum) {
4116 qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data);
4117 /* Clearing lock-id by setting 0xff */
4118 qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff);
4119 } else if (retry < 10) {
4120 /* SV: XXX: IDC unlock retrying needed here? */
4121
4122 /* Retry for IDC-unlock */
4123 qla83xx_wait_logic();
4124 retry++;
4125 ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
4126 "Failed to release IDC lock, retyring=%d\n", retry);
4127 goto retry_unlock;
4128 }
4129 } else if (retry < 10) {
4130 /* Retry for IDC-unlock */
4131 qla83xx_wait_logic();
4132 retry++;
4133 ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
4134 "Failed to read drv-lockid, retyring=%d\n", retry);
4135 goto retry_unlock;
4136 }
4137
4138 return;
4139
4140 /* XXX: IDC-unlock implementation using access-control mbx */
4141 retry = 0;
4142retry_unlock2:
4143 if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
4144 if (retry < 10) {
4145 /* Retry for IDC-unlock */
4146 qla83xx_wait_logic();
4147 retry++;
4148 ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
4149 "Failed to release IDC lock, retyring=%d\n", retry);
4150 goto retry_unlock2;
4151 }
4152 }
4153
4154 return;
4155}
4156
4157int
4158__qla83xx_set_drv_presence(scsi_qla_host_t *vha)
4159{
4160 int rval = QLA_SUCCESS;
4161 struct qla_hw_data *ha = vha->hw;
4162 uint32_t drv_presence;
4163
4164 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4165 if (rval == QLA_SUCCESS) {
4166 drv_presence |= (1 << ha->portnum);
4167 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
4168 drv_presence);
4169 }
4170
4171 return rval;
4172}
4173
4174int
4175qla83xx_set_drv_presence(scsi_qla_host_t *vha)
4176{
4177 int rval = QLA_SUCCESS;
4178
4179 qla83xx_idc_lock(vha, 0);
4180 rval = __qla83xx_set_drv_presence(vha);
4181 qla83xx_idc_unlock(vha, 0);
4182
4183 return rval;
4184}
4185
4186int
4187__qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
4188{
4189 int rval = QLA_SUCCESS;
4190 struct qla_hw_data *ha = vha->hw;
4191 uint32_t drv_presence;
4192
4193 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4194 if (rval == QLA_SUCCESS) {
4195 drv_presence &= ~(1 << ha->portnum);
4196 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
4197 drv_presence);
4198 }
4199
4200 return rval;
4201}
4202
4203int
4204qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
4205{
4206 int rval = QLA_SUCCESS;
4207
4208 qla83xx_idc_lock(vha, 0);
4209 rval = __qla83xx_clear_drv_presence(vha);
4210 qla83xx_idc_unlock(vha, 0);
4211
4212 return rval;
4213}
4214
4215void
4216qla83xx_need_reset_handler(scsi_qla_host_t *vha)
4217{
4218 struct qla_hw_data *ha = vha->hw;
4219 uint32_t drv_ack, drv_presence;
4220 unsigned long ack_timeout;
4221
4222 /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */
4223 ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
4224 while (1) {
4225 qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
4226 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
4227 if (drv_ack == drv_presence)
4228 break;
4229
4230 if (time_after_eq(jiffies, ack_timeout)) {
4231 ql_log(ql_log_warn, vha, 0xb067,
4232 "RESET ACK TIMEOUT! drv_presence=0x%x "
4233 "drv_ack=0x%x\n", drv_presence, drv_ack);
4234 /*
4235 * The function(s) which did not ack in time are forced
4236 * to withdraw any further participation in the IDC
4237 * reset.
4238 */
4239 if (drv_ack != drv_presence)
4240 qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
4241 drv_ack);
4242 break;
4243 }
4244
4245 qla83xx_idc_unlock(vha, 0);
4246 msleep(1000);
4247 qla83xx_idc_lock(vha, 0);
4248 }
4249
4250 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD);
4251 ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
4252}
4253
4254int
4255qla83xx_device_bootstrap(scsi_qla_host_t *vha)
4256{
4257 int rval = QLA_SUCCESS;
4258 uint32_t idc_control;
4259
4260 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
4261 ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n");
4262
4263 /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */
4264 __qla83xx_get_idc_control(vha, &idc_control);
4265 idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET;
4266 __qla83xx_set_idc_control(vha, 0);
4267
4268 qla83xx_idc_unlock(vha, 0);
4269 rval = qla83xx_restart_nic_firmware(vha);
4270 qla83xx_idc_lock(vha, 0);
4271
4272 if (rval != QLA_SUCCESS) {
4273 ql_log(ql_log_fatal, vha, 0xb06a,
4274 "Failed to restart NIC f/w.\n");
4275 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED);
4276 ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n");
4277 } else {
4278 ql_dbg(ql_dbg_p3p, vha, 0xb06c,
4279 "Success in restarting nic f/w.\n");
4280 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY);
4281 ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n");
4282 }
4283
4284 return rval;
4285}
4286
4287/* Assumes idc_lock always held on entry */
4288int
4289qla83xx_idc_state_handler(scsi_qla_host_t *base_vha)
4290{
4291 struct qla_hw_data *ha = base_vha->hw;
4292 int rval = QLA_SUCCESS;
4293 unsigned long dev_init_timeout;
4294 uint32_t dev_state;
4295
4296 /* Wait for MAX-INIT-TIMEOUT for the device to go ready */
4297 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
4298
4299 while (1) {
4300
4301 if (time_after_eq(jiffies, dev_init_timeout)) {
4302 ql_log(ql_log_warn, base_vha, 0xb06e,
4303 "Initialization TIMEOUT!\n");
4304 /* Init timeout. Disable further NIC Core
4305 * communication.
4306 */
4307 qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
4308 QLA8XXX_DEV_FAILED);
4309 ql_log(ql_log_info, base_vha, 0xb06f,
4310 "HW State: FAILED.\n");
4311 }
4312
4313 qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
4314 switch (dev_state) {
4315 case QLA8XXX_DEV_READY:
4316 if (ha->flags.nic_core_reset_owner)
4317 qla83xx_idc_audit(base_vha,
4318 IDC_AUDIT_COMPLETION);
4319 ha->flags.nic_core_reset_owner = 0;
4320 ql_dbg(ql_dbg_p3p, base_vha, 0xb070,
4321 "Reset_owner reset by 0x%x.\n",
4322 ha->portnum);
4323 goto exit;
4324 case QLA8XXX_DEV_COLD:
4325 if (ha->flags.nic_core_reset_owner)
4326 rval = qla83xx_device_bootstrap(base_vha);
4327 else {
4328 /* Wait for AEN to change device-state */
4329 qla83xx_idc_unlock(base_vha, 0);
4330 msleep(1000);
4331 qla83xx_idc_lock(base_vha, 0);
4332 }
4333 break;
4334 case QLA8XXX_DEV_INITIALIZING:
4335 /* Wait for AEN to change device-state */
4336 qla83xx_idc_unlock(base_vha, 0);
4337 msleep(1000);
4338 qla83xx_idc_lock(base_vha, 0);
4339 break;
4340 case QLA8XXX_DEV_NEED_RESET:
4341 if (!ql2xdontresethba && ha->flags.nic_core_reset_owner)
4342 qla83xx_need_reset_handler(base_vha);
4343 else {
4344 /* Wait for AEN to change device-state */
4345 qla83xx_idc_unlock(base_vha, 0);
4346 msleep(1000);
4347 qla83xx_idc_lock(base_vha, 0);
4348 }
4349 /* reset timeout value after need reset handler */
4350 dev_init_timeout = jiffies +
4351 (ha->fcoe_dev_init_timeout * HZ);
4352 break;
4353 case QLA8XXX_DEV_NEED_QUIESCENT:
4354 /* XXX: DEBUG for now */
4355 qla83xx_idc_unlock(base_vha, 0);
4356 msleep(1000);
4357 qla83xx_idc_lock(base_vha, 0);
4358 break;
4359 case QLA8XXX_DEV_QUIESCENT:
4360 /* XXX: DEBUG for now */
4361 if (ha->flags.quiesce_owner)
4362 goto exit;
4363
4364 qla83xx_idc_unlock(base_vha, 0);
4365 msleep(1000);
4366 qla83xx_idc_lock(base_vha, 0);
4367 dev_init_timeout = jiffies +
4368 (ha->fcoe_dev_init_timeout * HZ);
4369 break;
4370 case QLA8XXX_DEV_FAILED:
4371 if (ha->flags.nic_core_reset_owner)
4372 qla83xx_idc_audit(base_vha,
4373 IDC_AUDIT_COMPLETION);
4374 ha->flags.nic_core_reset_owner = 0;
4375 __qla83xx_clear_drv_presence(base_vha);
4376 qla83xx_idc_unlock(base_vha, 0);
4377 qla8xxx_dev_failed_handler(base_vha);
4378 rval = QLA_FUNCTION_FAILED;
4379 qla83xx_idc_lock(base_vha, 0);
4380 goto exit;
4381 case QLA8XXX_BAD_VALUE:
4382 qla83xx_idc_unlock(base_vha, 0);
4383 msleep(1000);
4384 qla83xx_idc_lock(base_vha, 0);
4385 break;
4386 default:
4387 ql_log(ql_log_warn, base_vha, 0xb071,
4388 "Unknow Device State: %x.\n", dev_state);
4389 qla83xx_idc_unlock(base_vha, 0);
4390 qla8xxx_dev_failed_handler(base_vha);
4391 rval = QLA_FUNCTION_FAILED;
4392 qla83xx_idc_lock(base_vha, 0);
4393 goto exit;
4394 }
4395 }
4396
4397exit:
4398 return rval;
4399}
4400
3697/************************************************************************** 4401/**************************************************************************
3698* qla2x00_do_dpc 4402* qla2x00_do_dpc
3699* This kernel thread is a task that is schedule by the interrupt handler 4403* This kernel thread is a task that is schedule by the interrupt handler
@@ -3749,7 +4453,7 @@ qla2x00_do_dpc(void *data)
3749 &base_vha->dpc_flags)) { 4453 &base_vha->dpc_flags)) {
3750 qla82xx_idc_lock(ha); 4454 qla82xx_idc_lock(ha);
3751 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 4455 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3752 QLA82XX_DEV_FAILED); 4456 QLA8XXX_DEV_FAILED);
3753 qla82xx_idc_unlock(ha); 4457 qla82xx_idc_unlock(ha);
3754 ql_log(ql_log_info, base_vha, 0x4004, 4458 ql_log(ql_log_info, base_vha, 0x4004,
3755 "HW State: FAILED.\n"); 4459 "HW State: FAILED.\n");
@@ -3819,14 +4523,21 @@ qla2x00_do_dpc(void *data)
3819 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 4523 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
3820 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 4524 ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
3821 "Quiescence mode scheduled.\n"); 4525 "Quiescence mode scheduled.\n");
3822 qla82xx_device_state_handler(base_vha); 4526 if (IS_QLA82XX(ha)) {
3823 clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags); 4527 qla82xx_device_state_handler(base_vha);
3824 if (!ha->flags.quiesce_owner) { 4528 clear_bit(ISP_QUIESCE_NEEDED,
3825 qla2x00_perform_loop_resync(base_vha); 4529 &base_vha->dpc_flags);
3826 4530 if (!ha->flags.quiesce_owner) {
3827 qla82xx_idc_lock(ha); 4531 qla2x00_perform_loop_resync(base_vha);
3828 qla82xx_clear_qsnt_ready(base_vha); 4532
3829 qla82xx_idc_unlock(ha); 4533 qla82xx_idc_lock(ha);
4534 qla82xx_clear_qsnt_ready(base_vha);
4535 qla82xx_idc_unlock(ha);
4536 }
4537 } else {
4538 clear_bit(ISP_QUIESCE_NEEDED,
4539 &base_vha->dpc_flags);
4540 qla2x00_quiesce_io(base_vha);
3830 } 4541 }
3831 ql_dbg(ql_dbg_dpc, base_vha, 0x400a, 4542 ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
3832 "Quiescence mode end.\n"); 4543 "Quiescence mode end.\n");
@@ -4326,7 +5037,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
4326 qla82xx_idc_lock(ha); 5037 qla82xx_idc_lock(ha);
4327 5038
4328 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5039 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4329 QLA82XX_DEV_INITIALIZING); 5040 QLA8XXX_DEV_INITIALIZING);
4330 5041
4331 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, 5042 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
4332 QLA82XX_IDC_VERSION); 5043 QLA82XX_IDC_VERSION);
@@ -4350,12 +5061,12 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
4350 "HW State: FAILED.\n"); 5061 "HW State: FAILED.\n");
4351 qla82xx_clear_drv_active(ha); 5062 qla82xx_clear_drv_active(ha);
4352 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5063 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4353 QLA82XX_DEV_FAILED); 5064 QLA8XXX_DEV_FAILED);
4354 } else { 5065 } else {
4355 ql_log(ql_log_info, base_vha, 0x900c, 5066 ql_log(ql_log_info, base_vha, 0x900c,
4356 "HW State: READY.\n"); 5067 "HW State: READY.\n");
4357 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5068 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4358 QLA82XX_DEV_READY); 5069 QLA8XXX_DEV_READY);
4359 qla82xx_idc_unlock(ha); 5070 qla82xx_idc_unlock(ha);
4360 ha->flags.isp82xx_fw_hung = 0; 5071 ha->flags.isp82xx_fw_hung = 0;
4361 rval = qla82xx_restart_isp(base_vha); 5072 rval = qla82xx_restart_isp(base_vha);
@@ -4370,7 +5081,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
4370 "This devfn is not reset owner = 0x%x.\n", 5081 "This devfn is not reset owner = 0x%x.\n",
4371 ha->pdev->devfn); 5082 ha->pdev->devfn);
4372 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == 5083 if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
4373 QLA82XX_DEV_READY)) { 5084 QLA8XXX_DEV_READY)) {
4374 ha->flags.isp82xx_fw_hung = 0; 5085 ha->flags.isp82xx_fw_hung = 0;
4375 rval = qla82xx_restart_isp(base_vha); 5086 rval = qla82xx_restart_isp(base_vha);
4376 qla82xx_idc_lock(ha); 5087 qla82xx_idc_lock(ha);
@@ -4495,6 +5206,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
4495 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) }, 5206 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
4496 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, 5207 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
4497 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, 5208 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
5209 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
4498 { 0 }, 5210 { 0 },
4499}; 5211};
4500MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 5212MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h
index d70f0300898..892a81e457b 100644
--- a/drivers/scsi/qla2xxx/qla_settings.h
+++ b/drivers/scsi/qla2xxx/qla_settings.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index a683e766d1a..32fdc2a66dd 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
@@ -966,16 +966,16 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
966 QLA82XX_IDC_PARAM_ADDR , 8); 966 QLA82XX_IDC_PARAM_ADDR , 8);
967 967
968 if (*wptr == __constant_cpu_to_le32(0xffffffff)) { 968 if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
969 ha->nx_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT; 969 ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
970 ha->nx_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT; 970 ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
971 } else { 971 } else {
972 ha->nx_dev_init_timeout = le32_to_cpu(*wptr++); 972 ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr++);
973 ha->nx_reset_timeout = le32_to_cpu(*wptr); 973 ha->fcoe_reset_timeout = le32_to_cpu(*wptr);
974 } 974 }
975 ql_dbg(ql_dbg_init, vha, 0x004e, 975 ql_dbg(ql_dbg_init, vha, 0x004e,
976 "nx_dev_init_timeout=%d " 976 "fcoe_dev_init_timeout=%d "
977 "nx_reset_timeout=%d.\n", ha->nx_dev_init_timeout, 977 "fcoe_reset_timeout=%d.\n", ha->fcoe_dev_init_timeout,
978 ha->nx_reset_timeout); 978 ha->fcoe_reset_timeout);
979 return; 979 return;
980} 980}
981 981
@@ -1017,7 +1017,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
1017 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) 1017 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
1018 return; 1018 return;
1019 1019
1020 if (ha->flags.isp82xx_reset_hdlr_active) 1020 if (ha->flags.nic_core_reset_hdlr_active)
1021 return; 1021 return;
1022 1022
1023 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, 1023 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
@@ -1662,6 +1662,23 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha)
1662 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1662 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1663} 1663}
1664 1664
1665static uint32_t
1666qla83xx_select_led_port(struct qla_hw_data *ha)
1667{
1668 uint32_t led_select_value = 0;
1669
1670 if (!IS_QLA83XX(ha))
1671 goto out;
1672
1673 if (ha->flags.port0)
1674 led_select_value = QLA83XX_LED_PORT0;
1675 else
1676 led_select_value = QLA83XX_LED_PORT1;
1677
1678out:
1679 return led_select_value;
1680}
1681
1665void 1682void
1666qla83xx_beacon_blink(struct scsi_qla_host *vha) 1683qla83xx_beacon_blink(struct scsi_qla_host *vha)
1667{ 1684{
@@ -1669,22 +1686,34 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha)
1669 struct qla_hw_data *ha = vha->hw; 1686 struct qla_hw_data *ha = vha->hw;
1670 uint16_t led_cfg[6]; 1687 uint16_t led_cfg[6];
1671 uint16_t orig_led_cfg[6]; 1688 uint16_t orig_led_cfg[6];
1689 uint32_t led_10_value, led_43_value;
1672 1690
1673 if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha)) 1691 if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha))
1674 return; 1692 return;
1675 1693
1676 if (IS_QLA2031(ha) && ha->beacon_blink_led) { 1694 if (!ha->beacon_blink_led)
1677 if (ha->flags.port0) 1695 return;
1678 led_select_value = 0x00201320; 1696
1679 else 1697 if (IS_QLA2031(ha)) {
1680 led_select_value = 0x00201328; 1698 led_select_value = qla83xx_select_led_port(ha);
1681 1699
1682 qla83xx_write_remote_reg(vha, led_select_value, 0x40002000); 1700 qla83xx_wr_reg(vha, led_select_value, 0x40002000);
1683 qla83xx_write_remote_reg(vha, led_select_value + 4, 0x40002000); 1701 qla83xx_wr_reg(vha, led_select_value + 4, 0x40002000);
1702 msleep(1000);
1703 qla83xx_wr_reg(vha, led_select_value, 0x40004000);
1704 qla83xx_wr_reg(vha, led_select_value + 4, 0x40004000);
1705 } else if (IS_QLA8031(ha)) {
1706 led_select_value = qla83xx_select_led_port(ha);
1707
1708 qla83xx_rd_reg(vha, led_select_value, &led_10_value);
1709 qla83xx_rd_reg(vha, led_select_value + 0x10, &led_43_value);
1710 qla83xx_wr_reg(vha, led_select_value, 0x01f44000);
1711 msleep(500);
1712 qla83xx_wr_reg(vha, led_select_value, 0x400001f4);
1684 msleep(1000); 1713 msleep(1000);
1685 qla83xx_write_remote_reg(vha, led_select_value, 0x40004000); 1714 qla83xx_wr_reg(vha, led_select_value, led_10_value);
1686 qla83xx_write_remote_reg(vha, led_select_value + 4, 0x40004000); 1715 qla83xx_wr_reg(vha, led_select_value + 0x10, led_43_value);
1687 } else if ((IS_QLA8031(ha) || IS_QLA81XX(ha)) && ha->beacon_blink_led) { 1716 } else if (IS_QLA81XX(ha)) {
1688 int rval; 1717 int rval;
1689 1718
1690 /* Save Current */ 1719 /* Save Current */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index f5fdb16bec9..cfe934e1af4 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -1,15 +1,15 @@
1/* 1/*
2 * QLogic Fibre Channel HBA Driver 2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.04.00.03-k" 10#define QLA2XXX_VERSION "8.04.00.07-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 4 13#define QLA_DRIVER_MINOR_VER 4
14#define QLA_DRIVER_PATCH_VER 0 14#define QLA_DRIVER_PATCH_VER 0
15#define QLA_DRIVER_BETA_VER 3 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig
index f1ad02ea212..e4dc7c733c2 100644
--- a/drivers/scsi/qla4xxx/Kconfig
+++ b/drivers/scsi/qla4xxx/Kconfig
@@ -4,5 +4,5 @@ config SCSI_QLA_ISCSI
4 select SCSI_ISCSI_ATTRS 4 select SCSI_ISCSI_ATTRS
5 select ISCSI_BOOT_SYSFS 5 select ISCSI_BOOT_SYSFS
6 ---help--- 6 ---help---
7 This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX) 7 This driver supports the QLogic 40xx (ISP4XXX), 8022 (ISP82XX)
8 iSCSI host adapter family. 8 and 8032 (ISP83XX) iSCSI host adapter family.
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
index 5b44139ff43..4230977748c 100644
--- a/drivers/scsi/qla4xxx/Makefile
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -1,5 +1,5 @@
1qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \ 1qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
2 ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o ql4_bsg.o 2 ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o ql4_bsg.o ql4_83xx.o
3 3
4obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o 4obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
5 5
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
new file mode 100644
index 00000000000..6e9af20be12
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -0,0 +1,1611 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include <linux/ratelimit.h>
9
10#include "ql4_def.h"
11#include "ql4_version.h"
12#include "ql4_glbl.h"
13#include "ql4_dbg.h"
14#include "ql4_inline.h"
15
16uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr)
17{
18 return readl((void __iomem *)(ha->nx_pcibase + addr));
19}
20
21void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val)
22{
23 writel(val, (void __iomem *)(ha->nx_pcibase + addr));
24}
25
26static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr)
27{
28 uint32_t val;
29 int ret_val = QLA_SUCCESS;
30
31 qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr);
32 val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num));
33 if (val != addr) {
34 ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n",
35 __func__, addr, val);
36 ret_val = QLA_ERROR;
37 }
38
39 return ret_val;
40}
41
42int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
43 uint32_t *data)
44{
45 int ret_val;
46
47 ret_val = qla4_83xx_set_win_base(ha, addr);
48
49 if (ret_val == QLA_SUCCESS)
50 *data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD);
51 else
52 ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n",
53 __func__, addr);
54
55 return ret_val;
56}
57
58int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
59 uint32_t data)
60{
61 int ret_val;
62
63 ret_val = qla4_83xx_set_win_base(ha, addr);
64
65 if (ret_val == QLA_SUCCESS)
66 qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data);
67 else
68 ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n",
69 __func__, addr, data);
70
71 return ret_val;
72}
73
74static int qla4_83xx_flash_lock(struct scsi_qla_host *ha)
75{
76 int lock_owner;
77 int timeout = 0;
78 uint32_t lock_status = 0;
79 int ret_val = QLA_SUCCESS;
80
81 while (lock_status == 0) {
82 lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK);
83 if (lock_status)
84 break;
85
86 if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) {
87 lock_owner = qla4_83xx_rd_reg(ha,
88 QLA83XX_FLASH_LOCK_ID);
89 ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n",
90 __func__, ha->func_num, lock_owner);
91 ret_val = QLA_ERROR;
92 break;
93 }
94 msleep(20);
95 }
96
97 qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num);
98 return ret_val;
99}
100
101static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha)
102{
103 /* Reading FLASH_UNLOCK register unlocks the Flash */
104 qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, 0xFF);
105 qla4_83xx_rd_reg(ha, QLA83XX_FLASH_UNLOCK);
106}
107
108int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
109 uint8_t *p_data, int u32_word_count)
110{
111 int i;
112 uint32_t u32_word;
113 uint32_t addr = flash_addr;
114 int ret_val = QLA_SUCCESS;
115
116 ret_val = qla4_83xx_flash_lock(ha);
117 if (ret_val == QLA_ERROR)
118 goto exit_lock_error;
119
120 if (addr & 0x03) {
121 ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
122 __func__, addr);
123 ret_val = QLA_ERROR;
124 goto exit_flash_read;
125 }
126
127 for (i = 0; i < u32_word_count; i++) {
128 ret_val = qla4_83xx_wr_reg_indirect(ha,
129 QLA83XX_FLASH_DIRECT_WINDOW,
130 (addr & 0xFFFF0000));
131 if (ret_val == QLA_ERROR) {
132 ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!",
133 __func__, addr);
134 goto exit_flash_read;
135 }
136
137 ret_val = qla4_83xx_rd_reg_indirect(ha,
138 QLA83XX_FLASH_DIRECT_DATA(addr),
139 &u32_word);
140 if (ret_val == QLA_ERROR) {
141 ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
142 __func__, addr);
143 goto exit_flash_read;
144 }
145
146 *(__le32 *)p_data = le32_to_cpu(u32_word);
147 p_data = p_data + 4;
148 addr = addr + 4;
149 }
150
151exit_flash_read:
152 qla4_83xx_flash_unlock(ha);
153
154exit_lock_error:
155 return ret_val;
156}
157
158int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
159 uint32_t flash_addr, uint8_t *p_data,
160 int u32_word_count)
161{
162 uint32_t i;
163 uint32_t u32_word;
164 uint32_t flash_offset;
165 uint32_t addr = flash_addr;
166 int ret_val = QLA_SUCCESS;
167
168 flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1);
169
170 if (addr & 0x3) {
171 ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
172 __func__, addr);
173 ret_val = QLA_ERROR;
174 goto exit_lockless_read;
175 }
176
177 ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW,
178 addr);
179 if (ret_val == QLA_ERROR) {
180 ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
181 __func__, addr);
182 goto exit_lockless_read;
183 }
184
185 /* Check if data is spread across multiple sectors */
186 if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
187 (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
188
189 /* Multi sector read */
190 for (i = 0; i < u32_word_count; i++) {
191 ret_val = qla4_83xx_rd_reg_indirect(ha,
192 QLA83XX_FLASH_DIRECT_DATA(addr),
193 &u32_word);
194 if (ret_val == QLA_ERROR) {
195 ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
196 __func__, addr);
197 goto exit_lockless_read;
198 }
199
200 *(__le32 *)p_data = le32_to_cpu(u32_word);
201 p_data = p_data + 4;
202 addr = addr + 4;
203 flash_offset = flash_offset + 4;
204
205 if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
206 /* This write is needed once for each sector */
207 ret_val = qla4_83xx_wr_reg_indirect(ha,
208 QLA83XX_FLASH_DIRECT_WINDOW,
209 addr);
210 if (ret_val == QLA_ERROR) {
211 ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
212 __func__, addr);
213 goto exit_lockless_read;
214 }
215 flash_offset = 0;
216 }
217 }
218 } else {
219 /* Single sector read */
220 for (i = 0; i < u32_word_count; i++) {
221 ret_val = qla4_83xx_rd_reg_indirect(ha,
222 QLA83XX_FLASH_DIRECT_DATA(addr),
223 &u32_word);
224 if (ret_val == QLA_ERROR) {
225 ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
226 __func__, addr);
227 goto exit_lockless_read;
228 }
229
230 *(__le32 *)p_data = le32_to_cpu(u32_word);
231 p_data = p_data + 4;
232 addr = addr + 4;
233 }
234 }
235
236exit_lockless_read:
237 return ret_val;
238}
239
240void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
241{
242 if (qla4_83xx_flash_lock(ha))
243 ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__);
244
245 /*
246 * We got the lock, or someone else is holding the lock
247 * since we are restting, forcefully unlock
248 */
249 qla4_83xx_flash_unlock(ha);
250}
251
252/**
253 * qla4_83xx_ms_mem_write_128b - Writes data to MS/off-chip memory
254 * @ha: Pointer to adapter structure
255 * @addr: Flash address to write to
256 * @data: Data to be written
257 * @count: word_count to be written
258 *
259 * Return: On success return QLA_SUCCESS
260 * On error return QLA_ERROR
261 **/
262static int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
263 uint32_t *data, uint32_t count)
264{
265 int i, j;
266 uint32_t agt_ctrl;
267 unsigned long flags;
268 int ret_val = QLA_SUCCESS;
269
270 /* Only 128-bit aligned access */
271 if (addr & 0xF) {
272 ret_val = QLA_ERROR;
273 goto exit_ms_mem_write;
274 }
275
276 write_lock_irqsave(&ha->hw_lock, flags);
277
278 /* Write address */
279 ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0);
280 if (ret_val == QLA_ERROR) {
281 ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n",
282 __func__);
283 goto exit_ms_mem_write_unlock;
284 }
285
286 for (i = 0; i < count; i++, addr += 16) {
287 if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
288 QLA8XXX_ADDR_QDR_NET_MAX)) ||
289 (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
290 QLA8XXX_ADDR_DDR_NET_MAX)))) {
291 ret_val = QLA_ERROR;
292 goto exit_ms_mem_write_unlock;
293 }
294
295 ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO,
296 addr);
297 /* Write data */
298 ret_val |= qla4_83xx_wr_reg_indirect(ha,
299 MD_MIU_TEST_AGT_WRDATA_LO,
300 *data++);
301 ret_val |= qla4_83xx_wr_reg_indirect(ha,
302 MD_MIU_TEST_AGT_WRDATA_HI,
303 *data++);
304 ret_val |= qla4_83xx_wr_reg_indirect(ha,
305 MD_MIU_TEST_AGT_WRDATA_ULO,
306 *data++);
307 ret_val |= qla4_83xx_wr_reg_indirect(ha,
308 MD_MIU_TEST_AGT_WRDATA_UHI,
309 *data++);
310 if (ret_val == QLA_ERROR) {
311 ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n",
312 __func__);
313 goto exit_ms_mem_write_unlock;
314 }
315
316 /* Check write status */
317 ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
318 MIU_TA_CTL_WRITE_ENABLE);
319 ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
320 MIU_TA_CTL_WRITE_START);
321 if (ret_val == QLA_ERROR) {
322 ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n",
323 __func__);
324 goto exit_ms_mem_write_unlock;
325 }
326
327 for (j = 0; j < MAX_CTL_CHECK; j++) {
328 ret_val = qla4_83xx_rd_reg_indirect(ha,
329 MD_MIU_TEST_AGT_CTRL,
330 &agt_ctrl);
331 if (ret_val == QLA_ERROR) {
332 ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n",
333 __func__);
334 goto exit_ms_mem_write_unlock;
335 }
336 if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
337 break;
338 }
339
340 /* Status check failed */
341 if (j >= MAX_CTL_CHECK) {
342 printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n",
343 __func__);
344 ret_val = QLA_ERROR;
345 goto exit_ms_mem_write_unlock;
346 }
347 }
348
349exit_ms_mem_write_unlock:
350 write_unlock_irqrestore(&ha->hw_lock, flags);
351
352exit_ms_mem_write:
353 return ret_val;
354}
355
356#define INTENT_TO_RECOVER 0x01
357#define PROCEED_TO_RECOVER 0x02
358
359static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha)
360{
361
362 uint32_t lock = 0, lockid;
363 int ret_val = QLA_ERROR;
364
365 lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
366
367 /* Check for other Recovery in progress, go wait */
368 if ((lockid & 0x3) != 0)
369 goto exit_lock_recovery;
370
371 /* Intent to Recover */
372 ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
373 (ha->func_num << 2) | INTENT_TO_RECOVER);
374
375 msleep(200);
376
377 /* Check Intent to Recover is advertised */
378 lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
379 if ((lockid & 0x3C) != (ha->func_num << 2))
380 goto exit_lock_recovery;
381
382 ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n",
383 __func__, ha->func_num);
384
385 /* Proceed to Recover */
386 ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
387 (ha->func_num << 2) | PROCEED_TO_RECOVER);
388
389 /* Force Unlock */
390 ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF);
391 ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK);
392
393 /* Clear bits 0-5 in IDC_RECOVERY register*/
394 ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0);
395
396 /* Get lock */
397 lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK);
398 if (lock) {
399 lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID);
400 lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num;
401 ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid);
402 ret_val = QLA_SUCCESS;
403 }
404
405exit_lock_recovery:
406 return ret_val;
407}
408
409#define QLA83XX_DRV_LOCK_MSLEEP 200
410
411int qla4_83xx_drv_lock(struct scsi_qla_host *ha)
412{
413 int timeout = 0;
414 uint32_t status = 0;
415 int ret_val = QLA_SUCCESS;
416 uint32_t first_owner = 0;
417 uint32_t tmo_owner = 0;
418 uint32_t lock_id;
419 uint32_t func_num;
420 uint32_t lock_cnt;
421
422 while (status == 0) {
423 status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK);
424 if (status) {
425 /* Increment Counter (8-31) and update func_num (0-7) on
426 * getting a successful lock */
427 lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
428 lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num;
429 qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id);
430 break;
431 }
432
433 if (timeout == 0)
434 /* Save counter + ID of function holding the lock for
435 * first failure */
436 first_owner = ha->isp_ops->rd_reg_direct(ha,
437 QLA83XX_DRV_LOCK_ID);
438
439 if (++timeout >=
440 (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) {
441 tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
442 func_num = tmo_owner & 0xFF;
443 lock_cnt = tmo_owner >> 8;
444 ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n",
445 __func__, ha->func_num, func_num, lock_cnt,
446 (first_owner & 0xFF));
447
448 if (first_owner != tmo_owner) {
449 /* Some other driver got lock, OR same driver
450 * got lock again (counter value changed), when
451 * we were waiting for lock.
452 * Retry for another 2 sec */
453 ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n",
454 __func__, ha->func_num);
455 timeout = 0;
456 } else {
457 /* Same driver holding lock > 2sec.
458 * Force Recovery */
459 ret_val = qla4_83xx_lock_recovery(ha);
460 if (ret_val == QLA_SUCCESS) {
461 /* Recovered and got lock */
462 ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n",
463 __func__, ha->func_num);
464 break;
465 }
466 /* Recovery Failed, some other function
467 * has the lock, wait for 2secs and retry */
468 ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timout\n",
469 __func__, ha->func_num);
470 timeout = 0;
471 }
472 }
473 msleep(QLA83XX_DRV_LOCK_MSLEEP);
474 }
475
476 return ret_val;
477}
478
479void qla4_83xx_drv_unlock(struct scsi_qla_host *ha)
480{
481 int id;
482
483 id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
484
485 if ((id & 0xFF) != ha->func_num) {
486 ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n",
487 __func__, ha->func_num, (id & 0xFF));
488 return;
489 }
490
491 /* Keep lock counter value, update the ha->func_num to 0xFF */
492 qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF));
493 qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK);
494}
495
496void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha)
497{
498 uint32_t idc_ctrl;
499
500 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
501 idc_ctrl |= DONTRESET_BIT0;
502 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
503 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
504 idc_ctrl));
505}
506
507void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha)
508{
509 uint32_t idc_ctrl;
510
511 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
512 idc_ctrl &= ~DONTRESET_BIT0;
513 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
514 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
515 idc_ctrl));
516}
517
518int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha)
519{
520 uint32_t idc_ctrl;
521
522 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
523 return idc_ctrl & DONTRESET_BIT0;
524}
525
526/*-------------------------IDC State Machine ---------------------*/
527
528enum {
529 UNKNOWN_CLASS = 0,
530 NIC_CLASS,
531 FCOE_CLASS,
532 ISCSI_CLASS
533};
534
535struct device_info {
536 int func_num;
537 int device_type;
538 int port_num;
539};
540
541static int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
542{
543 uint32_t drv_active;
544 uint32_t dev_part, dev_part1, dev_part2;
545 int i;
546 struct device_info device_map[16];
547 int func_nibble;
548 int nibble;
549 int nic_present = 0;
550 int iscsi_present = 0;
551 int iscsi_func_low = 0;
552
553 /* Use the dev_partition register to determine the PCI function number
554 * and then check drv_active register to see which driver is loaded */
555 dev_part1 = qla4_83xx_rd_reg(ha,
556 ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]);
557 dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2);
558 drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]);
559
560 /* Each function has 4 bits in dev_partition Info register,
561 * Lower 2 bits - device type, Upper 2 bits - physical port number */
562 dev_part = dev_part1;
563 for (i = nibble = 0; i <= 15; i++, nibble++) {
564 func_nibble = dev_part & (0xF << (nibble * 4));
565 func_nibble >>= (nibble * 4);
566 device_map[i].func_num = i;
567 device_map[i].device_type = func_nibble & 0x3;
568 device_map[i].port_num = func_nibble & 0xC;
569
570 if (device_map[i].device_type == NIC_CLASS) {
571 if (drv_active & (1 << device_map[i].func_num)) {
572 nic_present++;
573 break;
574 }
575 } else if (device_map[i].device_type == ISCSI_CLASS) {
576 if (drv_active & (1 << device_map[i].func_num)) {
577 if (!iscsi_present ||
578 (iscsi_present &&
579 (iscsi_func_low > device_map[i].func_num)))
580 iscsi_func_low = device_map[i].func_num;
581
582 iscsi_present++;
583 }
584 }
585
586 /* For function_num[8..15] get info from dev_part2 register */
587 if (nibble == 7) {
588 nibble = 0;
589 dev_part = dev_part2;
590 }
591 }
592
593 /* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets
594 * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers
595 * present. */
596 if (!nic_present && (ha->func_num == iscsi_func_low)) {
597 DEBUG2(ql4_printk(KERN_INFO, ha,
598 "%s: can reset - NIC not present and lower iSCSI function is %d\n",
599 __func__, ha->func_num));
600 return 1;
601 }
602
603 return 0;
604}
605
606/**
607 * qla4_83xx_need_reset_handler - Code to start reset sequence
608 * @ha: pointer to adapter structure
609 *
610 * Note: IDC lock must be held upon entry
611 **/
612void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha)
613{
614 uint32_t dev_state, drv_state, drv_active;
615 unsigned long reset_timeout, dev_init_timeout;
616
617 ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n",
618 __func__);
619
620 if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
621 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n",
622 __func__));
623 qla4_8xxx_set_rst_ready(ha);
624
625 /* Non-reset owners ACK Reset and wait for device INIT state
626 * as part of Reset Recovery by Reset Owner */
627 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
628
629 do {
630 if (time_after_eq(jiffies, dev_init_timeout)) {
631 ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n",
632 __func__);
633 break;
634 }
635
636 ha->isp_ops->idc_unlock(ha);
637 msleep(1000);
638 ha->isp_ops->idc_lock(ha);
639
640 dev_state = qla4_8xxx_rd_direct(ha,
641 QLA8XXX_CRB_DEV_STATE);
642 } while (dev_state == QLA8XXX_DEV_NEED_RESET);
643 } else {
644 qla4_8xxx_set_rst_ready(ha);
645 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
646 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
647 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
648
649 ql4_printk(KERN_INFO, ha, "%s: drv_state = 0x%x, drv_active = 0x%x\n",
650 __func__, drv_state, drv_active);
651
652 while (drv_state != drv_active) {
653 if (time_after_eq(jiffies, reset_timeout)) {
654 ql4_printk(KERN_INFO, ha, "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
655 __func__, DRIVER_NAME, drv_state,
656 drv_active);
657 break;
658 }
659
660 ha->isp_ops->idc_unlock(ha);
661 msleep(1000);
662 ha->isp_ops->idc_lock(ha);
663
664 drv_state = qla4_8xxx_rd_direct(ha,
665 QLA8XXX_CRB_DRV_STATE);
666 drv_active = qla4_8xxx_rd_direct(ha,
667 QLA8XXX_CRB_DRV_ACTIVE);
668 }
669
670 if (drv_state != drv_active) {
671 ql4_printk(KERN_INFO, ha, "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n",
672 __func__, (drv_active ^ drv_state));
673 drv_active = drv_active & drv_state;
674 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE,
675 drv_active);
676 }
677
678 clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
679 /* Start Reset Recovery */
680 qla4_8xxx_device_bootstrap(ha);
681 }
682}
683
684void qla4_83xx_get_idc_param(struct scsi_qla_host *ha)
685{
686 uint32_t idc_params, ret_val;
687
688 ret_val = qla4_83xx_flash_read_u32(ha, QLA83XX_IDC_PARAM_ADDR,
689 (uint8_t *)&idc_params, 1);
690 if (ret_val == QLA_SUCCESS) {
691 ha->nx_dev_init_timeout = idc_params & 0xFFFF;
692 ha->nx_reset_timeout = (idc_params >> 16) & 0xFFFF;
693 } else {
694 ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
695 ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
696 }
697
698 DEBUG2(ql4_printk(KERN_DEBUG, ha,
699 "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n",
700 __func__, ha->nx_dev_init_timeout,
701 ha->nx_reset_timeout));
702}
703
704/*-------------------------Reset Sequence Functions-----------------------*/
705
706static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha)
707{
708 uint8_t *phdr;
709
710 if (!ha->reset_tmplt.buff) {
711 ql4_printk(KERN_ERR, ha, "%s: Error: Invalid reset_seq_template\n",
712 __func__);
713 return;
714 }
715
716 phdr = ha->reset_tmplt.buff;
717
718 DEBUG2(ql4_printk(KERN_INFO, ha,
719 "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n",
720 *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
721 *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
722 *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
723 *(phdr+13), *(phdr+14), *(phdr+15)));
724}
725
726static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)
727{
728 uint8_t *p_cache;
729 uint32_t src, count, size;
730 uint64_t dest;
731 int ret_val = QLA_SUCCESS;
732
733 src = QLA83XX_BOOTLOADER_FLASH_ADDR;
734 dest = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_ADDR);
735 size = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_SIZE);
736
737 /* 128 bit alignment check */
738 if (size & 0xF)
739 size = (size + 16) & ~0xF;
740
741 /* 16 byte count */
742 count = size/16;
743
744 p_cache = vmalloc(size);
745 if (p_cache == NULL) {
746 ql4_printk(KERN_ERR, ha, "%s: Failed to allocate memory for boot loader cache\n",
747 __func__);
748 ret_val = QLA_ERROR;
749 goto exit_copy_bootloader;
750 }
751
752 ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache,
753 size / sizeof(uint32_t));
754 if (ret_val == QLA_ERROR) {
755 ql4_printk(KERN_ERR, ha, "%s: Error reading firmware from flash\n",
756 __func__);
757 goto exit_copy_error;
758 }
759 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read firmware from flash\n",
760 __func__));
761
762 /* 128 bit/16 byte write to MS memory */
763 ret_val = qla4_83xx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
764 count);
765 if (ret_val == QLA_ERROR) {
766 ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n",
767 __func__);
768 goto exit_copy_error;
769 }
770
771 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Wrote firmware size %d to MS\n",
772 __func__, size));
773
774exit_copy_error:
775 vfree(p_cache);
776
777exit_copy_bootloader:
778 return ret_val;
779}
780
781static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha)
782{
783 uint32_t val, ret_val = QLA_ERROR;
784 int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
785
786 do {
787 val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE);
788 if (val == PHAN_INITIALIZE_COMPLETE) {
789 DEBUG2(ql4_printk(KERN_INFO, ha,
790 "%s: Command Peg initialization complete. State=0x%x\n",
791 __func__, val));
792 ret_val = QLA_SUCCESS;
793 break;
794 }
795 msleep(CRB_CMDPEG_CHECK_DELAY);
796 } while (--retries);
797
798 return ret_val;
799}
800
801/**
802 * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till
803 * value read ANDed with test_mask is equal to test_result.
804 *
805 * @ha : Pointer to adapter structure
806 * @addr : CRB register address
807 * @duration : Poll for total of "duration" msecs
808 * @test_mask : Mask value read with "test_mask"
809 * @test_result : Compare (value&test_mask) with test_result.
810 **/
811static int qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr,
812 int duration, uint32_t test_mask,
813 uint32_t test_result)
814{
815 uint32_t value;
816 uint8_t retries;
817 int ret_val = QLA_SUCCESS;
818
819 ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
820 if (ret_val == QLA_ERROR)
821 goto exit_poll_reg;
822
823 retries = duration / 10;
824 do {
825 if ((value & test_mask) != test_result) {
826 msleep(duration / 10);
827 ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
828 if (ret_val == QLA_ERROR)
829 goto exit_poll_reg;
830
831 ret_val = QLA_ERROR;
832 } else {
833 ret_val = QLA_SUCCESS;
834 break;
835 }
836 } while (retries--);
837
838exit_poll_reg:
839 if (ret_val == QLA_ERROR) {
840 ha->reset_tmplt.seq_error++;
841 ql4_printk(KERN_ERR, ha, "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n",
842 __func__, value, test_mask, test_result);
843 }
844
845 return ret_val;
846}
847
848static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha)
849{
850 uint32_t sum = 0;
851 uint16_t *buff = (uint16_t *)ha->reset_tmplt.buff;
852 int u16_count = ha->reset_tmplt.hdr->size / sizeof(uint16_t);
853 int ret_val;
854
855 while (u16_count-- > 0)
856 sum += *buff++;
857
858 while (sum >> 16)
859 sum = (sum & 0xFFFF) + (sum >> 16);
860
861 /* checksum of 0 indicates a valid template */
862 if (~sum) {
863 ret_val = QLA_SUCCESS;
864 } else {
865 ql4_printk(KERN_ERR, ha, "%s: Reset seq checksum failed\n",
866 __func__);
867 ret_val = QLA_ERROR;
868 }
869
870 return ret_val;
871}
872
873/**
874 * qla4_83xx_read_reset_template - Read Reset Template from Flash
875 * @ha: Pointer to adapter structure
876 **/
877void qla4_83xx_read_reset_template(struct scsi_qla_host *ha)
878{
879 uint8_t *p_buff;
880 uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
881 uint32_t ret_val;
882
883 ha->reset_tmplt.seq_error = 0;
884 ha->reset_tmplt.buff = vmalloc(QLA83XX_RESTART_TEMPLATE_SIZE);
885 if (ha->reset_tmplt.buff == NULL) {
886 ql4_printk(KERN_ERR, ha, "%s: Failed to allocate reset template resources\n",
887 __func__);
888 goto exit_read_reset_template;
889 }
890
891 p_buff = ha->reset_tmplt.buff;
892 addr = QLA83XX_RESET_TEMPLATE_ADDR;
893
894 tmplt_hdr_def_size = sizeof(struct qla4_83xx_reset_template_hdr) /
895 sizeof(uint32_t);
896
897 DEBUG2(ql4_printk(KERN_INFO, ha,
898 "%s: Read template hdr size %d from Flash\n",
899 __func__, tmplt_hdr_def_size));
900
901 /* Copy template header from flash */
902 ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
903 tmplt_hdr_def_size);
904 if (ret_val != QLA_SUCCESS) {
905 ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
906 __func__);
907 goto exit_read_template_error;
908 }
909
910 ha->reset_tmplt.hdr =
911 (struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff;
912
913 /* Validate the template header size and signature */
914 tmplt_hdr_size = ha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
915 if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
916 (ha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
917 ql4_printk(KERN_ERR, ha, "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n",
918 __func__, tmplt_hdr_size, tmplt_hdr_def_size);
919 goto exit_read_template_error;
920 }
921
922 addr = QLA83XX_RESET_TEMPLATE_ADDR + ha->reset_tmplt.hdr->hdr_size;
923 p_buff = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size;
924 tmplt_hdr_def_size = (ha->reset_tmplt.hdr->size -
925 ha->reset_tmplt.hdr->hdr_size) / sizeof(uint32_t);
926
927 DEBUG2(ql4_printk(KERN_INFO, ha,
928 "%s: Read rest of the template size %d\n",
929 __func__, ha->reset_tmplt.hdr->size));
930
931 /* Copy rest of the template */
932 ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
933 tmplt_hdr_def_size);
934 if (ret_val != QLA_SUCCESS) {
935 ql4_printk(KERN_ERR, ha, "%s: Failed to read reset tempelate\n",
936 __func__);
937 goto exit_read_template_error;
938 }
939
940 /* Integrity check */
941 if (qla4_83xx_reset_seq_checksum_test(ha)) {
942 ql4_printk(KERN_ERR, ha, "%s: Reset Seq checksum failed!\n",
943 __func__);
944 goto exit_read_template_error;
945 }
946 DEBUG2(ql4_printk(KERN_INFO, ha,
947 "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n",
948 __func__));
949
950 /* Get STOP, START, INIT sequence offsets */
951 ha->reset_tmplt.init_offset = ha->reset_tmplt.buff +
952 ha->reset_tmplt.hdr->init_seq_offset;
953 ha->reset_tmplt.start_offset = ha->reset_tmplt.buff +
954 ha->reset_tmplt.hdr->start_seq_offset;
955 ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff +
956 ha->reset_tmplt.hdr->hdr_size;
957 qla4_83xx_dump_reset_seq_hdr(ha);
958
959 goto exit_read_reset_template;
960
961exit_read_template_error:
962 vfree(ha->reset_tmplt.buff);
963
964exit_read_reset_template:
965 return;
966}
967
968/**
969 * qla4_83xx_read_write_crb_reg - Read from raddr and write value to waddr.
970 *
971 * @ha : Pointer to adapter structure
972 * @raddr : CRB address to read from
973 * @waddr : CRB address to write to
974 **/
975static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha,
976 uint32_t raddr, uint32_t waddr)
977{
978 uint32_t value;
979
980 qla4_83xx_rd_reg_indirect(ha, raddr, &value);
981 qla4_83xx_wr_reg_indirect(ha, waddr, value);
982}
983
984/**
985 * qla4_83xx_rmw_crb_reg - Read Modify Write crb register
986 *
987 * This function read value from raddr, AND with test_mask,
988 * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
989 *
990 * @ha : Pointer to adapter structure
991 * @raddr : CRB address to read from
992 * @waddr : CRB address to write to
993 * @p_rmw_hdr : header with shift/or/xor values.
994 **/
995static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr,
996 uint32_t waddr,
997 struct qla4_83xx_rmw *p_rmw_hdr)
998{
999 uint32_t value;
1000
1001 if (p_rmw_hdr->index_a)
1002 value = ha->reset_tmplt.array[p_rmw_hdr->index_a];
1003 else
1004 qla4_83xx_rd_reg_indirect(ha, raddr, &value);
1005
1006 value &= p_rmw_hdr->test_mask;
1007 value <<= p_rmw_hdr->shl;
1008 value >>= p_rmw_hdr->shr;
1009 value |= p_rmw_hdr->or_value;
1010 value ^= p_rmw_hdr->xor_value;
1011
1012 qla4_83xx_wr_reg_indirect(ha, waddr, value);
1013
1014 return;
1015}
1016
1017static void qla4_83xx_write_list(struct scsi_qla_host *ha,
1018 struct qla4_83xx_reset_entry_hdr *p_hdr)
1019{
1020 struct qla4_83xx_entry *p_entry;
1021 uint32_t i;
1022
1023 p_entry = (struct qla4_83xx_entry *)
1024 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1025
1026 for (i = 0; i < p_hdr->count; i++, p_entry++) {
1027 qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2);
1028 if (p_hdr->delay)
1029 udelay((uint32_t)(p_hdr->delay));
1030 }
1031}
1032
1033static void qla4_83xx_read_write_list(struct scsi_qla_host *ha,
1034 struct qla4_83xx_reset_entry_hdr *p_hdr)
1035{
1036 struct qla4_83xx_entry *p_entry;
1037 uint32_t i;
1038
1039 p_entry = (struct qla4_83xx_entry *)
1040 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1041
1042 for (i = 0; i < p_hdr->count; i++, p_entry++) {
1043 qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2);
1044 if (p_hdr->delay)
1045 udelay((uint32_t)(p_hdr->delay));
1046 }
1047}
1048
1049static void qla4_83xx_poll_list(struct scsi_qla_host *ha,
1050 struct qla4_83xx_reset_entry_hdr *p_hdr)
1051{
1052 long delay;
1053 struct qla4_83xx_entry *p_entry;
1054 struct qla4_83xx_poll *p_poll;
1055 uint32_t i;
1056 uint32_t value;
1057
1058 p_poll = (struct qla4_83xx_poll *)
1059 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1060
1061 /* Entries start after 8 byte qla4_83xx_poll, poll header contains
1062 * the test_mask, test_value. */
1063 p_entry = (struct qla4_83xx_entry *)((char *)p_poll +
1064 sizeof(struct qla4_83xx_poll));
1065
1066 delay = (long)p_hdr->delay;
1067 if (!delay) {
1068 for (i = 0; i < p_hdr->count; i++, p_entry++) {
1069 qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
1070 p_poll->test_mask,
1071 p_poll->test_value);
1072 }
1073 } else {
1074 for (i = 0; i < p_hdr->count; i++, p_entry++) {
1075 if (qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
1076 p_poll->test_mask,
1077 p_poll->test_value)) {
1078 qla4_83xx_rd_reg_indirect(ha, p_entry->arg1,
1079 &value);
1080 qla4_83xx_rd_reg_indirect(ha, p_entry->arg2,
1081 &value);
1082 }
1083 }
1084 }
1085}
1086
1087static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha,
1088 struct qla4_83xx_reset_entry_hdr *p_hdr)
1089{
1090 long delay;
1091 struct qla4_83xx_quad_entry *p_entry;
1092 struct qla4_83xx_poll *p_poll;
1093 uint32_t i;
1094
1095 p_poll = (struct qla4_83xx_poll *)
1096 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1097 p_entry = (struct qla4_83xx_quad_entry *)
1098 ((char *)p_poll + sizeof(struct qla4_83xx_poll));
1099 delay = (long)p_hdr->delay;
1100
1101 for (i = 0; i < p_hdr->count; i++, p_entry++) {
1102 qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr,
1103 p_entry->dr_value);
1104 qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
1105 p_entry->ar_value);
1106 if (delay) {
1107 if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
1108 p_poll->test_mask,
1109 p_poll->test_value)) {
1110 DEBUG2(ql4_printk(KERN_INFO, ha,
1111 "%s: Timeout Error: poll list, item_num %d, entry_num %d\n",
1112 __func__, i,
1113 ha->reset_tmplt.seq_index));
1114 }
1115 }
1116 }
1117}
1118
1119static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha,
1120 struct qla4_83xx_reset_entry_hdr *p_hdr)
1121{
1122 struct qla4_83xx_entry *p_entry;
1123 struct qla4_83xx_rmw *p_rmw_hdr;
1124 uint32_t i;
1125
1126 p_rmw_hdr = (struct qla4_83xx_rmw *)
1127 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1128 p_entry = (struct qla4_83xx_entry *)
1129 ((char *)p_rmw_hdr + sizeof(struct qla4_83xx_rmw));
1130
1131 for (i = 0; i < p_hdr->count; i++, p_entry++) {
1132 qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2,
1133 p_rmw_hdr);
1134 if (p_hdr->delay)
1135 udelay((uint32_t)(p_hdr->delay));
1136 }
1137}
1138
1139static void qla4_83xx_pause(struct scsi_qla_host *ha,
1140 struct qla4_83xx_reset_entry_hdr *p_hdr)
1141{
1142 if (p_hdr->delay)
1143 mdelay((uint32_t)((long)p_hdr->delay));
1144}
1145
1146static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha,
1147 struct qla4_83xx_reset_entry_hdr *p_hdr)
1148{
1149 long delay;
1150 int index;
1151 struct qla4_83xx_quad_entry *p_entry;
1152 struct qla4_83xx_poll *p_poll;
1153 uint32_t i;
1154 uint32_t value;
1155
1156 p_poll = (struct qla4_83xx_poll *)
1157 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1158 p_entry = (struct qla4_83xx_quad_entry *)
1159 ((char *)p_poll + sizeof(struct qla4_83xx_poll));
1160 delay = (long)p_hdr->delay;
1161
1162 for (i = 0; i < p_hdr->count; i++, p_entry++) {
1163 qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
1164 p_entry->ar_value);
1165 if (delay) {
1166 if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
1167 p_poll->test_mask,
1168 p_poll->test_value)) {
1169 DEBUG2(ql4_printk(KERN_INFO, ha,
1170 "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n",
1171 __func__, i,
1172 ha->reset_tmplt.seq_index));
1173 } else {
1174 index = ha->reset_tmplt.array_index;
1175 qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr,
1176 &value);
1177 ha->reset_tmplt.array[index++] = value;
1178
1179 if (index == QLA83XX_MAX_RESET_SEQ_ENTRIES)
1180 ha->reset_tmplt.array_index = 1;
1181 }
1182 }
1183 }
1184}
1185
1186static void qla4_83xx_seq_end(struct scsi_qla_host *ha,
1187 struct qla4_83xx_reset_entry_hdr *p_hdr)
1188{
1189 ha->reset_tmplt.seq_end = 1;
1190}
1191
1192static void qla4_83xx_template_end(struct scsi_qla_host *ha,
1193 struct qla4_83xx_reset_entry_hdr *p_hdr)
1194{
1195 ha->reset_tmplt.template_end = 1;
1196
1197 if (ha->reset_tmplt.seq_error == 0) {
1198 DEBUG2(ql4_printk(KERN_INFO, ha,
1199 "%s: Reset sequence completed SUCCESSFULLY.\n",
1200 __func__));
1201 } else {
1202 ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n",
1203 __func__);
1204 }
1205}
1206
1207/**
1208 * qla4_83xx_process_reset_template - Process reset template.
1209 *
1210 * Process all entries in reset template till entry with SEQ_END opcode,
1211 * which indicates end of the reset template processing. Each entry has a
1212 * Reset Entry header, entry opcode/command, with size of the entry, number
1213 * of entries in sub-sequence and delay in microsecs or timeout in millisecs.
1214 *
1215 * @ha : Pointer to adapter structure
1216 * @p_buff : Common reset entry header.
1217 **/
1218static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha,
1219 char *p_buff)
1220{
1221 int index, entries;
1222 struct qla4_83xx_reset_entry_hdr *p_hdr;
1223 char *p_entry = p_buff;
1224
1225 ha->reset_tmplt.seq_end = 0;
1226 ha->reset_tmplt.template_end = 0;
1227 entries = ha->reset_tmplt.hdr->entries;
1228 index = ha->reset_tmplt.seq_index;
1229
1230 for (; (!ha->reset_tmplt.seq_end) && (index < entries); index++) {
1231
1232 p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry;
1233 switch (p_hdr->cmd) {
1234 case OPCODE_NOP:
1235 break;
1236 case OPCODE_WRITE_LIST:
1237 qla4_83xx_write_list(ha, p_hdr);
1238 break;
1239 case OPCODE_READ_WRITE_LIST:
1240 qla4_83xx_read_write_list(ha, p_hdr);
1241 break;
1242 case OPCODE_POLL_LIST:
1243 qla4_83xx_poll_list(ha, p_hdr);
1244 break;
1245 case OPCODE_POLL_WRITE_LIST:
1246 qla4_83xx_poll_write_list(ha, p_hdr);
1247 break;
1248 case OPCODE_READ_MODIFY_WRITE:
1249 qla4_83xx_read_modify_write(ha, p_hdr);
1250 break;
1251 case OPCODE_SEQ_PAUSE:
1252 qla4_83xx_pause(ha, p_hdr);
1253 break;
1254 case OPCODE_SEQ_END:
1255 qla4_83xx_seq_end(ha, p_hdr);
1256 break;
1257 case OPCODE_TMPL_END:
1258 qla4_83xx_template_end(ha, p_hdr);
1259 break;
1260 case OPCODE_POLL_READ_LIST:
1261 qla4_83xx_poll_read_list(ha, p_hdr);
1262 break;
1263 default:
1264 ql4_printk(KERN_ERR, ha, "%s: Unknown command ==> 0x%04x on entry = %d\n",
1265 __func__, p_hdr->cmd, index);
1266 break;
1267 }
1268
1269 /* Set pointer to next entry in the sequence. */
1270 p_entry += p_hdr->size;
1271 }
1272
1273 ha->reset_tmplt.seq_index = index;
1274}
1275
1276static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha)
1277{
1278 ha->reset_tmplt.seq_index = 0;
1279 qla4_83xx_process_reset_template(ha, ha->reset_tmplt.stop_offset);
1280
1281 if (ha->reset_tmplt.seq_end != 1)
1282 ql4_printk(KERN_ERR, ha, "%s: Abrupt STOP Sub-Sequence end.\n",
1283 __func__);
1284}
1285
1286static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha)
1287{
1288 qla4_83xx_process_reset_template(ha, ha->reset_tmplt.start_offset);
1289
1290 if (ha->reset_tmplt.template_end != 1)
1291 ql4_printk(KERN_ERR, ha, "%s: Abrupt START Sub-Sequence end.\n",
1292 __func__);
1293}
1294
1295static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha)
1296{
1297 qla4_83xx_process_reset_template(ha, ha->reset_tmplt.init_offset);
1298
1299 if (ha->reset_tmplt.seq_end != 1)
1300 ql4_printk(KERN_ERR, ha, "%s: Abrupt INIT Sub-Sequence end.\n",
1301 __func__);
1302}
1303
1304static int qla4_83xx_restart(struct scsi_qla_host *ha)
1305{
1306 int ret_val = QLA_SUCCESS;
1307
1308 qla4_83xx_process_stop_seq(ha);
1309
1310 /* Collect minidump*/
1311 if (!test_and_clear_bit(AF_83XX_NO_FW_DUMP, &ha->flags))
1312 qla4_8xxx_get_minidump(ha);
1313
1314 qla4_83xx_process_init_seq(ha);
1315
1316 if (qla4_83xx_copy_bootloader(ha)) {
1317 ql4_printk(KERN_ERR, ha, "%s: Copy bootloader, firmware restart failed!\n",
1318 __func__);
1319 ret_val = QLA_ERROR;
1320 goto exit_restart;
1321 }
1322
1323 qla4_83xx_wr_reg(ha, QLA83XX_FW_IMAGE_VALID, QLA83XX_BOOT_FROM_FLASH);
1324 qla4_83xx_process_start_seq(ha);
1325
1326exit_restart:
1327 return ret_val;
1328}
1329
1330int qla4_83xx_start_firmware(struct scsi_qla_host *ha)
1331{
1332 int ret_val = QLA_SUCCESS;
1333
1334 ret_val = qla4_83xx_restart(ha);
1335 if (ret_val == QLA_ERROR) {
1336 ql4_printk(KERN_ERR, ha, "%s: Restart error\n", __func__);
1337 goto exit_start_fw;
1338 } else {
1339 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restart done\n",
1340 __func__));
1341 }
1342
1343 ret_val = qla4_83xx_check_cmd_peg_status(ha);
1344 if (ret_val == QLA_ERROR)
1345 ql4_printk(KERN_ERR, ha, "%s: Peg not initialized\n",
1346 __func__);
1347
1348exit_start_fw:
1349 return ret_val;
1350}
1351
1352/*----------------------Interrupt Related functions ---------------------*/
1353
1354void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
1355{
1356 uint32_t mb_int, ret;
1357
1358 if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
1359 qla4_8xxx_mbx_intr_disable(ha);
1360
1361 ret = readl(&ha->qla4_83xx_reg->mbox_int);
1362 mb_int = ret & ~INT_ENABLE_FW_MB;
1363 writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
1364 writel(1, &ha->qla4_83xx_reg->leg_int_mask);
1365}
1366
1367void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
1368{
1369 uint32_t mb_int;
1370
1371 qla4_8xxx_mbx_intr_enable(ha);
1372 mb_int = INT_ENABLE_FW_MB;
1373 writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
1374 writel(0, &ha->qla4_83xx_reg->leg_int_mask);
1375
1376 set_bit(AF_INTERRUPTS_ON, &ha->flags);
1377}
1378
1379void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
1380 int incount)
1381{
1382 int i;
1383
1384 /* Load all mailbox registers, except mailbox 0. */
1385 for (i = 1; i < incount; i++)
1386 writel(mbx_cmd[i], &ha->qla4_83xx_reg->mailbox_in[i]);
1387
1388 writel(mbx_cmd[0], &ha->qla4_83xx_reg->mailbox_in[0]);
1389
1390 /* Set Host Interrupt register to 1, to tell the firmware that
1391 * a mailbox command is pending. Firmware after reading the
1392 * mailbox command, clears the host interrupt register */
1393 writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr);
1394}
1395
1396void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount)
1397{
1398 int intr_status;
1399
1400 intr_status = readl(&ha->qla4_83xx_reg->risc_intr);
1401 if (intr_status) {
1402 ha->mbox_status_count = outcount;
1403 ha->isp_ops->interrupt_service_routine(ha, intr_status);
1404 }
1405}
1406
1407/**
1408 * qla4_83xx_isp_reset - Resets ISP and aborts all outstanding commands.
1409 * @ha: pointer to host adapter structure.
1410 **/
1411int qla4_83xx_isp_reset(struct scsi_qla_host *ha)
1412{
1413 int rval;
1414 uint32_t dev_state;
1415
1416 ha->isp_ops->idc_lock(ha);
1417 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
1418
1419 if (ql4xdontresethba)
1420 qla4_83xx_set_idc_dontreset(ha);
1421
1422 if (dev_state == QLA8XXX_DEV_READY) {
1423 /* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset
1424 * recovery */
1425 if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) {
1426 ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n",
1427 __func__);
1428 rval = QLA_ERROR;
1429 goto exit_isp_reset;
1430 }
1431
1432 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n",
1433 __func__));
1434 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
1435 QLA8XXX_DEV_NEED_RESET);
1436
1437 } else {
1438 /* If device_state is NEED_RESET, go ahead with
1439 * Reset,irrespective of ql4xdontresethba. This is to allow a
1440 * non-reset-owner to force a reset. Non-reset-owner sets
1441 * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
1442 * and then forces a Reset by setting device_state to
1443 * NEED_RESET. */
1444 DEBUG2(ql4_printk(KERN_INFO, ha,
1445 "%s: HW state already set to NEED_RESET\n",
1446 __func__));
1447 }
1448
1449 /* For ISP8324, Reset owner is NIC, iSCSI or FCOE based on priority
1450 * and which drivers are present. Unlike ISP8022, the function setting
1451 * NEED_RESET, may not be the Reset owner. */
1452 if (qla4_83xx_can_perform_reset(ha))
1453 set_bit(AF_8XXX_RST_OWNER, &ha->flags);
1454
1455 ha->isp_ops->idc_unlock(ha);
1456 rval = qla4_8xxx_device_state_handler(ha);
1457
1458 ha->isp_ops->idc_lock(ha);
1459 qla4_8xxx_clear_rst_ready(ha);
1460exit_isp_reset:
1461 ha->isp_ops->idc_unlock(ha);
1462
1463 if (rval == QLA_SUCCESS)
1464 clear_bit(AF_FW_RECOVERY, &ha->flags);
1465
1466 return rval;
1467}
1468
1469static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
1470{
1471 u32 val = 0, val1 = 0;
1472 int i, status = QLA_SUCCESS;
1473
1474 status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val);
1475 DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val));
1476
1477 /* Port 0 Rx Buffer Pause Threshold Registers. */
1478 DEBUG2(ql4_printk(KERN_INFO, ha,
1479 "Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
1480 for (i = 0; i < 8; i++) {
1481 status = qla4_83xx_rd_reg_indirect(ha,
1482 QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val);
1483 DEBUG2(pr_info("0x%x ", val));
1484 }
1485
1486 DEBUG2(pr_info("\n"));
1487
1488 /* Port 1 Rx Buffer Pause Threshold Registers. */
1489 DEBUG2(ql4_printk(KERN_INFO, ha,
1490 "Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
1491 for (i = 0; i < 8; i++) {
1492 status = qla4_83xx_rd_reg_indirect(ha,
1493 QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val);
1494 DEBUG2(pr_info("0x%x ", val));
1495 }
1496
1497 DEBUG2(pr_info("\n"));
1498
1499 /* Port 0 RxB Traffic Class Max Cell Registers. */
1500 DEBUG2(ql4_printk(KERN_INFO, ha,
1501 "Port 0 RxB Traffic Class Max Cell Registers[3..0]:"));
1502 for (i = 0; i < 4; i++) {
1503 status = qla4_83xx_rd_reg_indirect(ha,
1504 QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val);
1505 DEBUG2(pr_info("0x%x ", val));
1506 }
1507
1508 DEBUG2(pr_info("\n"));
1509
1510 /* Port 1 RxB Traffic Class Max Cell Registers. */
1511 DEBUG2(ql4_printk(KERN_INFO, ha,
1512 "Port 1 RxB Traffic Class Max Cell Registers[3..0]:"));
1513 for (i = 0; i < 4; i++) {
1514 status = qla4_83xx_rd_reg_indirect(ha,
1515 QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val);
1516 DEBUG2(pr_info("0x%x ", val));
1517 }
1518
1519 DEBUG2(pr_info("\n"));
1520
1521 /* Port 0 RxB Rx Traffic Class Stats. */
1522 DEBUG2(ql4_printk(KERN_INFO, ha,
1523 "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]"));
1524 for (i = 7; i >= 0; i--) {
1525 status = qla4_83xx_rd_reg_indirect(ha,
1526 QLA83XX_PORT0_RXB_TC_STATS,
1527 &val);
1528 val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
1529 qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS,
1530 (val | (i << 29)));
1531 status = qla4_83xx_rd_reg_indirect(ha,
1532 QLA83XX_PORT0_RXB_TC_STATS,
1533 &val);
1534 DEBUG2(pr_info("0x%x ", val));
1535 }
1536
1537 DEBUG2(pr_info("\n"));
1538
1539 /* Port 1 RxB Rx Traffic Class Stats. */
1540 DEBUG2(ql4_printk(KERN_INFO, ha,
1541 "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]"));
1542 for (i = 7; i >= 0; i--) {
1543 status = qla4_83xx_rd_reg_indirect(ha,
1544 QLA83XX_PORT1_RXB_TC_STATS,
1545 &val);
1546 val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
1547 qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS,
1548 (val | (i << 29)));
1549 status = qla4_83xx_rd_reg_indirect(ha,
1550 QLA83XX_PORT1_RXB_TC_STATS,
1551 &val);
1552 DEBUG2(pr_info("0x%x ", val));
1553 }
1554
1555 DEBUG2(pr_info("\n"));
1556
1557 status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
1558 &val);
1559 status = qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
1560 &val1);
1561
1562 DEBUG2(ql4_printk(KERN_INFO, ha,
1563 "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
1564 val, val1));
1565}
1566
1567static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha)
1568{
1569 int i;
1570
1571 /* set SRE-Shim Control Register */
1572 qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL,
1573 QLA83XX_SET_PAUSE_VAL);
1574
1575 for (i = 0; i < 8; i++) {
1576 /* Port 0 Rx Buffer Pause Threshold Registers. */
1577 qla4_83xx_wr_reg_indirect(ha,
1578 QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4),
1579 QLA83XX_SET_PAUSE_VAL);
1580 /* Port 1 Rx Buffer Pause Threshold Registers. */
1581 qla4_83xx_wr_reg_indirect(ha,
1582 QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4),
1583 QLA83XX_SET_PAUSE_VAL);
1584 }
1585
1586 for (i = 0; i < 4; i++) {
1587 /* Port 0 RxB Traffic Class Max Cell Registers. */
1588 qla4_83xx_wr_reg_indirect(ha,
1589 QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4),
1590 QLA83XX_SET_TC_MAX_CELL_VAL);
1591 /* Port 1 RxB Traffic Class Max Cell Registers. */
1592 qla4_83xx_wr_reg_indirect(ha,
1593 QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4),
1594 QLA83XX_SET_TC_MAX_CELL_VAL);
1595 }
1596
1597 qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
1598 QLA83XX_SET_PAUSE_VAL);
1599 qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
1600 QLA83XX_SET_PAUSE_VAL);
1601
1602 ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
1603}
1604
1605void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
1606{
1607 ha->isp_ops->idc_lock(ha);
1608 qla4_83xx_dump_pause_control_regs(ha);
1609 __qla4_83xx_disable_pause(ha);
1610 ha->isp_ops->idc_unlock(ha);
1611}
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h
new file mode 100644
index 00000000000..6a00f903f2a
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_83xx.h
@@ -0,0 +1,283 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#ifndef __QL483XX_H
9#define __QL483XX_H
10
11/* Indirectly Mapped Registers */
12#define QLA83XX_FLASH_SPI_STATUS 0x2808E010
13#define QLA83XX_FLASH_SPI_CONTROL 0x2808E014
14#define QLA83XX_FLASH_STATUS 0x42100004
15#define QLA83XX_FLASH_CONTROL 0x42110004
16#define QLA83XX_FLASH_ADDR 0x42110008
17#define QLA83XX_FLASH_WRDATA 0x4211000C
18#define QLA83XX_FLASH_RDDATA 0x42110018
19#define QLA83XX_FLASH_DIRECT_WINDOW 0x42110030
20#define QLA83XX_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
21
22/* Directly Mapped Registers in 83xx register table */
23
24/* Flash access regs */
25#define QLA83XX_FLASH_LOCK 0x3850
26#define QLA83XX_FLASH_UNLOCK 0x3854
27#define QLA83XX_FLASH_LOCK_ID 0x3500
28
29/* Driver Lock regs */
30#define QLA83XX_DRV_LOCK 0x3868
31#define QLA83XX_DRV_UNLOCK 0x386C
32#define QLA83XX_DRV_LOCK_ID 0x3504
33#define QLA83XX_DRV_LOCKRECOVERY 0x379C
34
35/* IDC version */
36#define QLA83XX_IDC_VER_MAJ_VALUE 0x1
37#define QLA83XX_IDC_VER_MIN_VALUE 0x0
38
39/* IDC Registers : Driver Coexistence Defines */
40#define QLA83XX_CRB_IDC_VER_MAJOR 0x3780
41#define QLA83XX_CRB_IDC_VER_MINOR 0x3798
42#define QLA83XX_IDC_DRV_CTRL 0x3790
43#define QLA83XX_IDC_DRV_AUDIT 0x3794
44#define QLA83XX_SRE_SHIM_CONTROL 0x0D200284
45#define QLA83XX_PORT0_RXB_PAUSE_THRS 0x0B2003A4
46#define QLA83XX_PORT1_RXB_PAUSE_THRS 0x0B2013A4
47#define QLA83XX_PORT0_RXB_TC_MAX_CELL 0x0B200388
48#define QLA83XX_PORT1_RXB_TC_MAX_CELL 0x0B201388
49#define QLA83XX_PORT0_RXB_TC_STATS 0x0B20039C
50#define QLA83XX_PORT1_RXB_TC_STATS 0x0B20139C
51#define QLA83XX_PORT2_IFB_PAUSE_THRS 0x0B200704
52#define QLA83XX_PORT3_IFB_PAUSE_THRS 0x0B201704
53
54/* set value to pause threshold value */
55#define QLA83XX_SET_PAUSE_VAL 0x0
56#define QLA83XX_SET_TC_MAX_CELL_VAL 0x03FF03FF
57
58/* qla_83xx_reg_tbl registers */
59#define QLA83XX_PEG_HALT_STATUS1 0x34A8
60#define QLA83XX_PEG_HALT_STATUS2 0x34AC
61#define QLA83XX_PEG_ALIVE_COUNTER 0x34B0 /* FW_HEARTBEAT */
62#define QLA83XX_FW_CAPABILITIES 0x3528
63#define QLA83XX_CRB_DRV_ACTIVE 0x3788 /* IDC_DRV_PRESENCE */
64#define QLA83XX_CRB_DEV_STATE 0x3784 /* IDC_DEV_STATE */
65#define QLA83XX_CRB_DRV_STATE 0x378C /* IDC_DRV_ACK */
66#define QLA83XX_CRB_DRV_SCRATCH 0x3548
67#define QLA83XX_CRB_DEV_PART_INFO1 0x37E0
68#define QLA83XX_CRB_DEV_PART_INFO2 0x37E4
69
70#define QLA83XX_FW_VER_MAJOR 0x3550
71#define QLA83XX_FW_VER_MINOR 0x3554
72#define QLA83XX_FW_VER_SUB 0x3558
73#define QLA83XX_NPAR_STATE 0x359C
74#define QLA83XX_FW_IMAGE_VALID 0x35FC
75#define QLA83XX_CMDPEG_STATE 0x3650
76#define QLA83XX_ASIC_TEMP 0x37B4
77#define QLA83XX_FW_API 0x356C
78#define QLA83XX_DRV_OP_MODE 0x3570
79
80static const uint32_t qla4_83xx_reg_tbl[] = {
81 QLA83XX_PEG_HALT_STATUS1,
82 QLA83XX_PEG_HALT_STATUS2,
83 QLA83XX_PEG_ALIVE_COUNTER,
84 QLA83XX_CRB_DRV_ACTIVE,
85 QLA83XX_CRB_DEV_STATE,
86 QLA83XX_CRB_DRV_STATE,
87 QLA83XX_CRB_DRV_SCRATCH,
88 QLA83XX_CRB_DEV_PART_INFO1,
89 QLA83XX_CRB_IDC_VER_MAJOR,
90 QLA83XX_FW_VER_MAJOR,
91 QLA83XX_FW_VER_MINOR,
92 QLA83XX_FW_VER_SUB,
93 QLA83XX_CMDPEG_STATE,
94 QLA83XX_ASIC_TEMP,
95};
96
97#define QLA83XX_CRB_WIN_BASE 0x3800
98#define QLA83XX_CRB_WIN_FUNC(f) (QLA83XX_CRB_WIN_BASE+((f)*4))
99#define QLA83XX_SEM_LOCK_BASE 0x3840
100#define QLA83XX_SEM_UNLOCK_BASE 0x3844
101#define QLA83XX_SEM_LOCK_FUNC(f) (QLA83XX_SEM_LOCK_BASE+((f)*8))
102#define QLA83XX_SEM_UNLOCK_FUNC(f) (QLA83XX_SEM_UNLOCK_BASE+((f)*8))
103#define QLA83XX_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0))
104#define QLA83XX_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
105#define QLA83XX_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4))
106#define QLA83XX_LINK_SPEED_FACTOR 10
107
108/* FLASH API Defines */
109#define QLA83xx_FLASH_MAX_WAIT_USEC 100
110#define QLA83XX_FLASH_LOCK_TIMEOUT 10000
111#define QLA83XX_FLASH_SECTOR_SIZE 65536
112#define QLA83XX_DRV_LOCK_TIMEOUT 2000
113#define QLA83XX_FLASH_SECTOR_ERASE_CMD 0xdeadbeef
114#define QLA83XX_FLASH_WRITE_CMD 0xdacdacda
115#define QLA83XX_FLASH_BUFFER_WRITE_CMD 0xcadcadca
116#define QLA83XX_FLASH_READ_RETRY_COUNT 2000
117#define QLA83XX_FLASH_STATUS_READY 0x6
118#define QLA83XX_FLASH_BUFFER_WRITE_MIN 2
119#define QLA83XX_FLASH_BUFFER_WRITE_MAX 64
120#define QLA83XX_FLASH_STATUS_REG_POLL_DELAY 1
121#define QLA83XX_ERASE_MODE 1
122#define QLA83XX_WRITE_MODE 2
123#define QLA83XX_DWORD_WRITE_MODE 3
124
125#define QLA83XX_GLOBAL_RESET 0x38CC
126#define QLA83XX_WILDCARD 0x38F0
127#define QLA83XX_INFORMANT 0x38FC
128#define QLA83XX_HOST_MBX_CTRL 0x3038
129#define QLA83XX_FW_MBX_CTRL 0x303C
130#define QLA83XX_BOOTLOADER_ADDR 0x355C
131#define QLA83XX_BOOTLOADER_SIZE 0x3560
132#define QLA83XX_FW_IMAGE_ADDR 0x3564
133#define QLA83XX_MBX_INTR_ENABLE 0x1000
134#define QLA83XX_MBX_INTR_MASK 0x1200
135
136/* IDC Control Register bit defines */
137#define DONTRESET_BIT0 0x1
138#define GRACEFUL_RESET_BIT1 0x2
139
140#define QLA83XX_HALT_STATUS_INFORMATIONAL (0x1 << 29)
141#define QLA83XX_HALT_STATUS_FW_RESET (0x2 << 29)
142#define QLA83XX_HALT_STATUS_UNRECOVERABLE (0x4 << 29)
143
144/* Firmware image definitions */
145#define QLA83XX_BOOTLOADER_FLASH_ADDR 0x10000
146#define QLA83XX_BOOT_FROM_FLASH 0
147
148#define QLA83XX_IDC_PARAM_ADDR 0x3e8020
149/* Reset template definitions */
150#define QLA83XX_MAX_RESET_SEQ_ENTRIES 16
151#define QLA83XX_RESTART_TEMPLATE_SIZE 0x2000
152#define QLA83XX_RESET_TEMPLATE_ADDR 0x4F0000
153#define QLA83XX_RESET_SEQ_VERSION 0x0101
154
155/* Reset template entry opcodes */
156#define OPCODE_NOP 0x0000
157#define OPCODE_WRITE_LIST 0x0001
158#define OPCODE_READ_WRITE_LIST 0x0002
159#define OPCODE_POLL_LIST 0x0004
160#define OPCODE_POLL_WRITE_LIST 0x0008
161#define OPCODE_READ_MODIFY_WRITE 0x0010
162#define OPCODE_SEQ_PAUSE 0x0020
163#define OPCODE_SEQ_END 0x0040
164#define OPCODE_TMPL_END 0x0080
165#define OPCODE_POLL_READ_LIST 0x0100
166
167/* Template Header */
168#define RESET_TMPLT_HDR_SIGNATURE 0xCAFE
169struct qla4_83xx_reset_template_hdr {
170 __le16 version;
171 __le16 signature;
172 __le16 size;
173 __le16 entries;
174 __le16 hdr_size;
175 __le16 checksum;
176 __le16 init_seq_offset;
177 __le16 start_seq_offset;
178} __packed;
179
180/* Common Entry Header. */
181struct qla4_83xx_reset_entry_hdr {
182 __le16 cmd;
183 __le16 size;
184 __le16 count;
185 __le16 delay;
186} __packed;
187
188/* Generic poll entry type. */
189struct qla4_83xx_poll {
190 __le32 test_mask;
191 __le32 test_value;
192} __packed;
193
194/* Read modify write entry type. */
195struct qla4_83xx_rmw {
196 __le32 test_mask;
197 __le32 xor_value;
198 __le32 or_value;
199 uint8_t shl;
200 uint8_t shr;
201 uint8_t index_a;
202 uint8_t rsvd;
203} __packed;
204
205/* Generic Entry Item with 2 DWords. */
206struct qla4_83xx_entry {
207 __le32 arg1;
208 __le32 arg2;
209} __packed;
210
211/* Generic Entry Item with 4 DWords.*/
212struct qla4_83xx_quad_entry {
213 __le32 dr_addr;
214 __le32 dr_value;
215 __le32 ar_addr;
216 __le32 ar_value;
217} __packed;
218
219struct qla4_83xx_reset_template {
220 int seq_index;
221 int seq_error;
222 int array_index;
223 uint32_t array[QLA83XX_MAX_RESET_SEQ_ENTRIES];
224 uint8_t *buff;
225 uint8_t *stop_offset;
226 uint8_t *start_offset;
227 uint8_t *init_offset;
228 struct qla4_83xx_reset_template_hdr *hdr;
229 uint8_t seq_end;
230 uint8_t template_end;
231};
232
233/* POLLRD Entry */
234struct qla83xx_minidump_entry_pollrd {
235 struct qla8xxx_minidump_entry_hdr h;
236 uint32_t select_addr;
237 uint32_t read_addr;
238 uint32_t select_value;
239 uint16_t select_value_stride;
240 uint16_t op_count;
241 uint32_t poll_wait;
242 uint32_t poll_mask;
243 uint32_t data_size;
244 uint32_t rsvd_1;
245};
246
247/* RDMUX2 Entry */
248struct qla83xx_minidump_entry_rdmux2 {
249 struct qla8xxx_minidump_entry_hdr h;
250 uint32_t select_addr_1;
251 uint32_t select_addr_2;
252 uint32_t select_value_1;
253 uint32_t select_value_2;
254 uint32_t op_count;
255 uint32_t select_value_mask;
256 uint32_t read_addr;
257 uint8_t select_value_stride;
258 uint8_t data_size;
259 uint8_t rsvd[2];
260};
261
262/* POLLRDMWR Entry */
263struct qla83xx_minidump_entry_pollrdmwr {
264 struct qla8xxx_minidump_entry_hdr h;
265 uint32_t addr_1;
266 uint32_t addr_2;
267 uint32_t value_1;
268 uint32_t value_2;
269 uint32_t poll_wait;
270 uint32_t poll_mask;
271 uint32_t modify_mask;
272 uint32_t data_size;
273};
274
275/* IDC additional information */
276struct qla4_83xx_idc_information {
277 uint32_t request_desc; /* IDC request descriptor */
278 uint32_t info1; /* IDC additional info */
279 uint32_t info2; /* IDC additional info */
280 uint32_t info3; /* IDC additional info */
281};
282
283#endif
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index c681b2a355e..76819b71ada 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -17,7 +17,7 @@ qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
17 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, 17 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
18 struct device, kobj))); 18 struct device, kobj)));
19 19
20 if (!is_qla8022(ha)) 20 if (is_qla40XX(ha))
21 return -EINVAL; 21 return -EINVAL;
22 22
23 if (!test_bit(AF_82XX_DUMP_READING, &ha->flags)) 23 if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
@@ -38,7 +38,7 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
38 long reading; 38 long reading;
39 int ret = 0; 39 int ret = 0;
40 40
41 if (!is_qla8022(ha)) 41 if (is_qla40XX(ha))
42 return -EINVAL; 42 return -EINVAL;
43 43
44 if (off != 0) 44 if (off != 0)
@@ -75,21 +75,21 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
75 break; 75 break;
76 case 2: 76 case 2:
77 /* Reset HBA */ 77 /* Reset HBA */
78 qla4_8xxx_idc_lock(ha); 78 ha->isp_ops->idc_lock(ha);
79 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 79 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
80 if (dev_state == QLA82XX_DEV_READY) { 80 if (dev_state == QLA8XXX_DEV_READY) {
81 ql4_printk(KERN_INFO, ha, 81 ql4_printk(KERN_INFO, ha,
82 "%s: Setting Need reset, reset_owner is 0x%x.\n", 82 "%s: Setting Need reset, reset_owner is 0x%x.\n",
83 __func__, ha->func_num); 83 __func__, ha->func_num);
84 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 84 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
85 QLA82XX_DEV_NEED_RESET); 85 QLA8XXX_DEV_NEED_RESET);
86 set_bit(AF_82XX_RST_OWNER, &ha->flags); 86 set_bit(AF_8XXX_RST_OWNER, &ha->flags);
87 } else 87 } else
88 ql4_printk(KERN_INFO, ha, 88 ql4_printk(KERN_INFO, ha,
89 "%s: Reset not performed as device state is 0x%x\n", 89 "%s: Reset not performed as device state is 0x%x\n",
90 __func__, dev_state); 90 __func__, dev_state);
91 91
92 qla4_8xxx_idc_unlock(ha); 92 ha->isp_ops->idc_unlock(ha);
93 break; 93 break;
94 default: 94 default:
95 /* do nothing */ 95 /* do nothing */
@@ -150,7 +150,7 @@ qla4xxx_fw_version_show(struct device *dev,
150{ 150{
151 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); 151 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
152 152
153 if (is_qla8022(ha)) 153 if (is_qla80XX(ha))
154 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", 154 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
155 ha->firmware_version[0], 155 ha->firmware_version[0],
156 ha->firmware_version[1], 156 ha->firmware_version[1],
@@ -214,7 +214,7 @@ qla4xxx_phy_port_cnt_show(struct device *dev, struct device_attribute *attr,
214{ 214{
215 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); 215 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
216 216
217 if (!is_qla8022(ha)) 217 if (is_qla40XX(ha))
218 return -ENOSYS; 218 return -ENOSYS;
219 219
220 return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_cnt); 220 return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_cnt);
@@ -226,7 +226,7 @@ qla4xxx_phy_port_num_show(struct device *dev, struct device_attribute *attr,
226{ 226{
227 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); 227 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
228 228
229 if (!is_qla8022(ha)) 229 if (is_qla40XX(ha))
230 return -ENOSYS; 230 return -ENOSYS;
231 231
232 return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_num); 232 return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_num);
@@ -238,7 +238,7 @@ qla4xxx_iscsi_func_cnt_show(struct device *dev, struct device_attribute *attr,
238{ 238{
239 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); 239 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
240 240
241 if (!is_qla8022(ha)) 241 if (is_qla40XX(ha))
242 return -ENOSYS; 242 return -ENOSYS;
243 243
244 return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt); 244 return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt);
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index 8d58ae27482..77b7c594010 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -37,7 +37,7 @@ void qla4xxx_dump_registers(struct scsi_qla_host *ha)
37 if (is_qla8022(ha)) { 37 if (is_qla8022(ha)) {
38 for (i = 1; i < MBOX_REG_COUNT; i++) 38 for (i = 1; i < MBOX_REG_COUNT; i++)
39 printk(KERN_INFO "mailbox[%d] = 0x%08X\n", 39 printk(KERN_INFO "mailbox[%d] = 0x%08X\n",
40 i, readl(&ha->qla4_8xxx_reg->mailbox_in[i])); 40 i, readl(&ha->qla4_82xx_reg->mailbox_in[i]));
41 return; 41 return;
42 } 42 }
43 43
@@ -131,3 +131,31 @@ void qla4xxx_dump_registers(struct scsi_qla_host *ha)
131 &ha->reg->ctrl_status); 131 &ha->reg->ctrl_status);
132 } 132 }
133} 133}
134
135void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha)
136{
137 uint32_t halt_status1, halt_status2;
138
139 halt_status1 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
140 halt_status2 = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS2);
141
142 if (is_qla8022(ha)) {
143 ql4_printk(KERN_INFO, ha,
144 "scsi(%ld): %s, ISP8022 Dumping hw/fw registers:\n"
145 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
146 " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
147 " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
148 " PEG_NET_4_PC: 0x%x\n", ha->host_no,
149 __func__, halt_status1, halt_status2,
150 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c),
151 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c),
152 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c),
153 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c),
154 qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c));
155 } else if (is_qla8032(ha)) {
156 ql4_printk(KERN_INFO, ha,
157 "scsi(%ld): %s, ISP8324 Dumping hw/fw registers:\n"
158 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n",
159 ha->host_no, __func__, halt_status1, halt_status2);
160 }
161}
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.h b/drivers/scsi/qla4xxx/ql4_dbg.h
index abd83602cdd..5b0afc18ef1 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.h
+++ b/drivers/scsi/qla4xxx/ql4_dbg.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 7fdba7f1ffb..329d553eae9 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -42,6 +42,7 @@
42#include "ql4_nx.h" 42#include "ql4_nx.h"
43#include "ql4_fw.h" 43#include "ql4_fw.h"
44#include "ql4_nvram.h" 44#include "ql4_nvram.h"
45#include "ql4_83xx.h"
45 46
46#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010 47#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
47#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010 48#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010
@@ -59,6 +60,10 @@
59#define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022 60#define PCI_DEVICE_ID_QLOGIC_ISP8022 0x8022
60#endif 61#endif
61 62
63#ifndef PCI_DEVICE_ID_QLOGIC_ISP8324
64#define PCI_DEVICE_ID_QLOGIC_ISP8324 0x8032
65#endif
66
62#define ISP4XXX_PCI_FN_1 0x1 67#define ISP4XXX_PCI_FN_1 0x1
63#define ISP4XXX_PCI_FN_2 0x3 68#define ISP4XXX_PCI_FN_2 0x3
64 69
@@ -388,8 +393,10 @@ struct isp_operations {
388 void (*disable_intrs) (struct scsi_qla_host *); 393 void (*disable_intrs) (struct scsi_qla_host *);
389 void (*enable_intrs) (struct scsi_qla_host *); 394 void (*enable_intrs) (struct scsi_qla_host *);
390 int (*start_firmware) (struct scsi_qla_host *); 395 int (*start_firmware) (struct scsi_qla_host *);
396 int (*restart_firmware) (struct scsi_qla_host *);
391 irqreturn_t (*intr_handler) (int , void *); 397 irqreturn_t (*intr_handler) (int , void *);
392 void (*interrupt_service_routine) (struct scsi_qla_host *, uint32_t); 398 void (*interrupt_service_routine) (struct scsi_qla_host *, uint32_t);
399 int (*need_reset) (struct scsi_qla_host *);
393 int (*reset_chip) (struct scsi_qla_host *); 400 int (*reset_chip) (struct scsi_qla_host *);
394 int (*reset_firmware) (struct scsi_qla_host *); 401 int (*reset_firmware) (struct scsi_qla_host *);
395 void (*queue_iocb) (struct scsi_qla_host *); 402 void (*queue_iocb) (struct scsi_qla_host *);
@@ -397,6 +404,15 @@ struct isp_operations {
397 uint16_t (*rd_shdw_req_q_out) (struct scsi_qla_host *); 404 uint16_t (*rd_shdw_req_q_out) (struct scsi_qla_host *);
398 uint16_t (*rd_shdw_rsp_q_in) (struct scsi_qla_host *); 405 uint16_t (*rd_shdw_rsp_q_in) (struct scsi_qla_host *);
399 int (*get_sys_info) (struct scsi_qla_host *); 406 int (*get_sys_info) (struct scsi_qla_host *);
407 uint32_t (*rd_reg_direct) (struct scsi_qla_host *, ulong);
408 void (*wr_reg_direct) (struct scsi_qla_host *, ulong, uint32_t);
409 int (*rd_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t *);
410 int (*wr_reg_indirect) (struct scsi_qla_host *, uint32_t, uint32_t);
411 int (*idc_lock) (struct scsi_qla_host *);
412 void (*idc_unlock) (struct scsi_qla_host *);
413 void (*rom_lock_recovery) (struct scsi_qla_host *);
414 void (*queue_mailbox_command) (struct scsi_qla_host *, uint32_t *, int);
415 void (*process_mailbox_interrupt) (struct scsi_qla_host *, int);
400}; 416};
401 417
402struct ql4_mdump_size_table { 418struct ql4_mdump_size_table {
@@ -497,8 +513,9 @@ struct scsi_qla_host {
497#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */ 513#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
498#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */ 514#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
499#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */ 515#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */
500#define AF_82XX_RST_OWNER 25 /* 0x02000000 */ 516#define AF_8XXX_RST_OWNER 25 /* 0x02000000 */
501#define AF_82XX_DUMP_READING 26 /* 0x04000000 */ 517#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
518#define AF_83XX_NO_FW_DUMP 27 /* 0x08000000 */
502 519
503 unsigned long dpc_flags; 520 unsigned long dpc_flags;
504 521
@@ -514,7 +531,7 @@ struct scsi_qla_host {
514#define DPC_RESET_ACTIVE 20 /* 0x00040000 */ 531#define DPC_RESET_ACTIVE 20 /* 0x00040000 */
515#define DPC_HA_UNRECOVERABLE 21 /* 0x00080000 ISP-82xx only*/ 532#define DPC_HA_UNRECOVERABLE 21 /* 0x00080000 ISP-82xx only*/
516#define DPC_HA_NEED_QUIESCENT 22 /* 0x00100000 ISP-82xx only*/ 533#define DPC_HA_NEED_QUIESCENT 22 /* 0x00100000 ISP-82xx only*/
517 534#define DPC_POST_IDC_ACK 23 /* 0x00200000 */
518 535
519 struct Scsi_Host *host; /* pointer to host data */ 536 struct Scsi_Host *host; /* pointer to host data */
520 uint32_t tot_ddbs; 537 uint32_t tot_ddbs;
@@ -647,7 +664,7 @@ struct scsi_qla_host {
647 uint8_t acb_version; 664 uint8_t acb_version;
648 665
649 /* qla82xx specific fields */ 666 /* qla82xx specific fields */
650 struct device_reg_82xx __iomem *qla4_8xxx_reg; /* Base I/O address */ 667 struct device_reg_82xx __iomem *qla4_82xx_reg; /* Base I/O address */
651 unsigned long nx_pcibase; /* Base I/O address */ 668 unsigned long nx_pcibase; /* Base I/O address */
652 uint8_t *nx_db_rd_ptr; /* Doorbell read pointer */ 669 uint8_t *nx_db_rd_ptr; /* Doorbell read pointer */
653 unsigned long nx_db_wr_ptr; /* Door bell write pointer */ 670 unsigned long nx_db_wr_ptr; /* Door bell write pointer */
@@ -733,6 +750,13 @@ struct scsi_qla_host {
733#define MAX_MRB 128 750#define MAX_MRB 128
734 struct mrb *active_mrb_array[MAX_MRB]; 751 struct mrb *active_mrb_array[MAX_MRB];
735 uint32_t mrb_index; 752 uint32_t mrb_index;
753
754 uint32_t *reg_tbl;
755 struct qla4_83xx_reset_template reset_tmplt;
756 struct device_reg_83xx __iomem *qla4_83xx_reg; /* Base I/O address
757 for ISP8324 */
758 uint32_t pf_bit;
759 struct qla4_83xx_idc_information idc_info;
736}; 760};
737 761
738struct ql4_task_data { 762struct ql4_task_data {
@@ -752,7 +776,7 @@ struct ql4_task_data {
752 776
753struct qla_endpoint { 777struct qla_endpoint {
754 struct Scsi_Host *host; 778 struct Scsi_Host *host;
755 struct sockaddr dst_addr; 779 struct sockaddr_storage dst_addr;
756}; 780};
757 781
758struct qla_conn { 782struct qla_conn {
@@ -795,13 +819,20 @@ static inline int is_qla8022(struct scsi_qla_host *ha)
795 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022; 819 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022;
796} 820}
797 821
798/* Note: Currently AER/EEH is now supported only for 8022 cards 822static inline int is_qla8032(struct scsi_qla_host *ha)
799 * This function needs to be updated when AER/EEH is enabled 823{
800 * for other cards. 824 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324;
801 */ 825}
826
827static inline int is_qla80XX(struct scsi_qla_host *ha)
828{
829 return is_qla8022(ha) || is_qla8032(ha);
830}
831
802static inline int is_aer_supported(struct scsi_qla_host *ha) 832static inline int is_aer_supported(struct scsi_qla_host *ha)
803{ 833{
804 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022; 834 return ((ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022) ||
835 (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324));
805} 836}
806 837
807static inline int adapter_up(struct scsi_qla_host *ha) 838static inline int adapter_up(struct scsi_qla_host *ha)
@@ -942,6 +973,20 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
942 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 973 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
943 974
944} 975}
976
977static inline int qla4_8xxx_rd_direct(struct scsi_qla_host *ha,
978 const uint32_t crb_reg)
979{
980 return ha->isp_ops->rd_reg_direct(ha, ha->reg_tbl[crb_reg]);
981}
982
983static inline void qla4_8xxx_wr_direct(struct scsi_qla_host *ha,
984 const uint32_t crb_reg,
985 const uint32_t value)
986{
987 ha->isp_ops->wr_reg_direct(ha, ha->reg_tbl[crb_reg], value);
988}
989
945/*---------------------------------------------------------------------------*/ 990/*---------------------------------------------------------------------------*/
946 991
947/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */ 992/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 7240948fb92..1c479502035 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -65,6 +65,40 @@ struct device_reg_82xx {
65#define ISRX_82XX_RISC_INT BIT_0 /* RISC interrupt. */ 65#define ISRX_82XX_RISC_INT BIT_0 /* RISC interrupt. */
66}; 66};
67 67
68/* ISP 83xx I/O Register Set structure */
69struct device_reg_83xx {
70 __le32 mailbox_in[16]; /* 0x0000 */
71 __le32 reserve1[496]; /* 0x0040 */
72 __le32 mailbox_out[16]; /* 0x0800 */
73 __le32 reserve2[496];
74 __le32 mbox_int; /* 0x1000 */
75 __le32 reserve3[63];
76 __le32 req_q_out; /* 0x1100 */
77 __le32 reserve4[63];
78
79 __le32 rsp_q_in; /* 0x1200 */
80 __le32 reserve5[1919];
81
82 __le32 req_q_in; /* 0x3000 */
83 __le32 reserve6[3];
84 __le32 iocb_int_mask; /* 0x3010 */
85 __le32 reserve7[3];
86 __le32 rsp_q_out; /* 0x3020 */
87 __le32 reserve8[3];
88 __le32 anonymousbuff; /* 0x3030 */
89 __le32 mb_int_mask; /* 0x3034 */
90
91 __le32 host_intr; /* 0x3038 - Host Interrupt Register */
92 __le32 risc_intr; /* 0x303C - RISC Interrupt Register */
93 __le32 reserve9[544];
94 __le32 leg_int_ptr; /* 0x38C0 - Legacy Interrupt Pointer Register */
95 __le32 leg_int_trig; /* 0x38C4 - Legacy Interrupt Trigger Control */
96 __le32 leg_int_mask; /* 0x38C8 - Legacy Interrupt Mask Register */
97};
98
99#define INT_ENABLE_FW_MB (1 << 2)
100#define INT_MASK_FW_MB (1 << 2)
101
68/* remote register set (access via PCI memory read/write) */ 102/* remote register set (access via PCI memory read/write) */
69struct isp_reg { 103struct isp_reg {
70#define MBOX_REG_COUNT 8 104#define MBOX_REG_COUNT 8
@@ -356,6 +390,9 @@ struct qla_flt_region {
356#define LOGOUT_OPTION_CLOSE_SESSION 0x0002 390#define LOGOUT_OPTION_CLOSE_SESSION 0x0002
357#define LOGOUT_OPTION_RELOGIN 0x0004 391#define LOGOUT_OPTION_RELOGIN 0x0004
358#define LOGOUT_OPTION_FREE_DDB 0x0008 392#define LOGOUT_OPTION_FREE_DDB 0x0008
393#define MBOX_CMD_SET_PARAM 0x0059
394#define SET_DRVR_VERSION 0x200
395#define MAX_DRVR_VER_LEN 24
359#define MBOX_CMD_EXECUTE_IOCB_A64 0x005A 396#define MBOX_CMD_EXECUTE_IOCB_A64 0x005A
360#define MBOX_CMD_INITIALIZE_FIRMWARE 0x0060 397#define MBOX_CMD_INITIALIZE_FIRMWARE 0x0060
361#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK 0x0061 398#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK 0x0061
@@ -417,6 +454,10 @@ struct qla_flt_region {
417#define MBOX_CMD_GET_CRASH_RECORD 0x0076 /* 4010 only */ 454#define MBOX_CMD_GET_CRASH_RECORD 0x0076 /* 4010 only */
418#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077 455#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077
419 456
457#define MBOX_CMD_IDC_ACK 0x0101
458#define MBOX_CMD_PORT_RESET 0x0120
459#define MBOX_CMD_SET_PORT_CONFIG 0x0122
460
420/* Mailbox status definitions */ 461/* Mailbox status definitions */
421#define MBOX_COMPLETION_STATUS 4 462#define MBOX_COMPLETION_STATUS 4
422#define MBOX_STS_BUSY 0x0007 463#define MBOX_STS_BUSY 0x0007
@@ -453,6 +494,8 @@ struct qla_flt_region {
453#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C 494#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C
454#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D 495#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
455#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E 496#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
497#define MBOX_ASTS_IDC_COMPLETE 0x8100
498#define MBOX_ASTS_IDC_NOTIFY 0x8101
456#define MBOX_ASTS_TXSCVR_INSERTED 0x8130 499#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
457#define MBOX_ASTS_TXSCVR_REMOVED 0x8131 500#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
458 501
@@ -1195,9 +1238,12 @@ struct ql_iscsi_stats {
1195 uint8_t reserved2[264]; /* 0x0308 - 0x040F */ 1238 uint8_t reserved2[264]; /* 0x0308 - 0x040F */
1196}; 1239};
1197 1240
1198#define QLA82XX_DBG_STATE_ARRAY_LEN 16 1241#define QLA8XXX_DBG_STATE_ARRAY_LEN 16
1199#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8 1242#define QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN 8
1200#define QLA82XX_DBG_RSVD_ARRAY_LEN 8 1243#define QLA8XXX_DBG_RSVD_ARRAY_LEN 8
1244#define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN 16
1245#define QLA83XX_SS_OCM_WNDREG_INDEX 3
1246#define QLA83XX_SS_PCI_INDEX 0
1201 1247
1202struct qla4_8xxx_minidump_template_hdr { 1248struct qla4_8xxx_minidump_template_hdr {
1203 uint32_t entry_type; 1249 uint32_t entry_type;
@@ -1214,8 +1260,9 @@ struct qla4_8xxx_minidump_template_hdr {
1214 uint32_t driver_info_word3; 1260 uint32_t driver_info_word3;
1215 uint32_t driver_info_word4; 1261 uint32_t driver_info_word4;
1216 1262
1217 uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN]; 1263 uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN];
1218 uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN]; 1264 uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN];
1265 uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN];
1219}; 1266};
1220 1267
1221#endif /* _QLA4X_FW_H */ 1268#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 5b2525c4139..57a5a3cf577 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -109,28 +109,28 @@ uint8_t qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
109void qla4_8xxx_pci_config(struct scsi_qla_host *); 109void qla4_8xxx_pci_config(struct scsi_qla_host *);
110int qla4_8xxx_iospace_config(struct scsi_qla_host *ha); 110int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
111int qla4_8xxx_load_risc(struct scsi_qla_host *); 111int qla4_8xxx_load_risc(struct scsi_qla_host *);
112irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id); 112irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id);
113void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha); 113void qla4_82xx_queue_iocb(struct scsi_qla_host *ha);
114void qla4_8xxx_complete_iocb(struct scsi_qla_host *ha); 114void qla4_82xx_complete_iocb(struct scsi_qla_host *ha);
115 115
116int qla4_8xxx_crb_win_lock(struct scsi_qla_host *); 116int qla4_82xx_crb_win_lock(struct scsi_qla_host *);
117void qla4_8xxx_crb_win_unlock(struct scsi_qla_host *); 117void qla4_82xx_crb_win_unlock(struct scsi_qla_host *);
118int qla4_8xxx_pci_get_crb_addr_2M(struct scsi_qla_host *, ulong *); 118int qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *, ulong *);
119void qla4_8xxx_wr_32(struct scsi_qla_host *, ulong, u32); 119void qla4_82xx_wr_32(struct scsi_qla_host *, ulong, u32);
120int qla4_8xxx_rd_32(struct scsi_qla_host *, ulong); 120uint32_t qla4_82xx_rd_32(struct scsi_qla_host *, ulong);
121int qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *, u64, void *, int); 121int qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *, u64, void *, int);
122int qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha, u64, void *, int); 122int qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha, u64, void *, int);
123int qla4_8xxx_isp_reset(struct scsi_qla_host *ha); 123int qla4_82xx_isp_reset(struct scsi_qla_host *ha);
124void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha, 124void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
125 uint32_t intr_status); 125 uint32_t intr_status);
126uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha); 126uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
127uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha); 127uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
128int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha); 128int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha);
129void qla4_8xxx_watchdog(struct scsi_qla_host *ha); 129void qla4_8xxx_watchdog(struct scsi_qla_host *ha);
130int qla4_8xxx_stop_firmware(struct scsi_qla_host *ha); 130int qla4_8xxx_stop_firmware(struct scsi_qla_host *ha);
131int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha); 131int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha);
132void qla4_8xxx_enable_intrs(struct scsi_qla_host *ha); 132void qla4_82xx_enable_intrs(struct scsi_qla_host *ha);
133void qla4_8xxx_disable_intrs(struct scsi_qla_host *ha); 133void qla4_82xx_disable_intrs(struct scsi_qla_host *ha);
134int qla4_8xxx_enable_msix(struct scsi_qla_host *ha); 134int qla4_8xxx_enable_msix(struct scsi_qla_host *ha);
135void qla4_8xxx_disable_msix(struct scsi_qla_host *ha); 135void qla4_8xxx_disable_msix(struct scsi_qla_host *ha);
136irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id); 136irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id);
@@ -138,8 +138,8 @@ irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id);
138irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id); 138irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id);
139void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha); 139void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha);
140void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha); 140void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha);
141int qla4_8xxx_idc_lock(struct scsi_qla_host *ha); 141int qla4_82xx_idc_lock(struct scsi_qla_host *ha);
142void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha); 142void qla4_82xx_idc_unlock(struct scsi_qla_host *ha);
143int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha); 143int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
144void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha); 144void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
145void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha); 145void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
@@ -203,6 +203,62 @@ int qla4xxx_req_template_size(struct scsi_qla_host *ha);
203void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha); 203void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
204void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha); 204void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
205void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha); 205void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
206int qla4_82xx_try_start_fw(struct scsi_qla_host *ha);
207int qla4_8xxx_need_reset(struct scsi_qla_host *ha);
208int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data);
209int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data);
210void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha);
211void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
212 int incount);
213void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
214void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
215 int incount);
216void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
217void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha);
218void qla4_83xx_disable_intrs(struct scsi_qla_host *ha);
219void qla4_83xx_enable_intrs(struct scsi_qla_host *ha);
220int qla4_83xx_start_firmware(struct scsi_qla_host *ha);
221irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id);
222void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
223 uint32_t intr_status);
224int qla4_83xx_isp_reset(struct scsi_qla_host *ha);
225void qla4_83xx_queue_iocb(struct scsi_qla_host *ha);
226void qla4_83xx_complete_iocb(struct scsi_qla_host *ha);
227uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
228uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
229uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr);
230void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val);
231int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
232 uint32_t *data);
233int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
234 uint32_t data);
235int qla4_83xx_drv_lock(struct scsi_qla_host *ha);
236void qla4_83xx_drv_unlock(struct scsi_qla_host *ha);
237void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha);
238void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
239 int incount);
240void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount);
241void qla4_83xx_read_reset_template(struct scsi_qla_host *ha);
242void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha);
243int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha);
244int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
245 uint32_t flash_addr, uint8_t *p_data,
246 int u32_word_count);
247void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha);
248void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha);
249int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
250 uint8_t *p_data, int u32_word_count);
251void qla4_83xx_get_idc_param(struct scsi_qla_host *ha);
252void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha);
253void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha);
254int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha);
255void qla4_8xxx_get_minidump(struct scsi_qla_host *ha);
256int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha);
257int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha);
258int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param);
259int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha);
260int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha);
261void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
206 262
207extern int ql4xextended_error_logging; 263extern int ql4xextended_error_logging;
208extern int ql4xdontresethba; 264extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index ddd9472066c..1aca1b4f70b 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -102,11 +102,18 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
102 102
103 if (is_qla8022(ha)) { 103 if (is_qla8022(ha)) {
104 writel(0, 104 writel(0,
105 (unsigned long __iomem *)&ha->qla4_8xxx_reg->req_q_out); 105 (unsigned long __iomem *)&ha->qla4_82xx_reg->req_q_out);
106 writel(0, 106 writel(0,
107 (unsigned long __iomem *)&ha->qla4_8xxx_reg->rsp_q_in); 107 (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_in);
108 writel(0, 108 writel(0,
109 (unsigned long __iomem *)&ha->qla4_8xxx_reg->rsp_q_out); 109 (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_out);
110 } else if (is_qla8032(ha)) {
111 writel(0,
112 (unsigned long __iomem *)&ha->qla4_83xx_reg->req_q_in);
113 writel(0,
114 (unsigned long __iomem *)&ha->qla4_83xx_reg->rsp_q_in);
115 writel(0,
116 (unsigned long __iomem *)&ha->qla4_83xx_reg->rsp_q_out);
110 } else { 117 } else {
111 /* 118 /*
112 * Initialize DMA Shadow registers. The firmware is really 119 * Initialize DMA Shadow registers. The firmware is really
@@ -524,7 +531,7 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
524 /* For 82xx, stop firmware before initializing because if BIOS 531 /* For 82xx, stop firmware before initializing because if BIOS
525 * has previously initialized firmware, then driver's initialize 532 * has previously initialized firmware, then driver's initialize
526 * firmware will fail. */ 533 * firmware will fail. */
527 if (is_qla8022(ha)) 534 if (is_qla80XX(ha))
528 qla4_8xxx_stop_firmware(ha); 535 qla4_8xxx_stop_firmware(ha);
529 536
530 ql4_printk(KERN_INFO, ha, "Initializing firmware..\n"); 537 ql4_printk(KERN_INFO, ha, "Initializing firmware..\n");
@@ -537,7 +544,7 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
537 if (!qla4xxx_fw_ready(ha)) 544 if (!qla4xxx_fw_ready(ha))
538 return status; 545 return status;
539 546
540 if (is_qla8022(ha) && !test_bit(AF_INIT_DONE, &ha->flags)) 547 if (is_qla80XX(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
541 qla4xxx_alloc_fw_dump(ha); 548 qla4xxx_alloc_fw_dump(ha);
542 549
543 return qla4xxx_get_firmware_status(ha); 550 return qla4xxx_get_firmware_status(ha);
@@ -946,9 +953,9 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
946 953
947 set_bit(AF_ONLINE, &ha->flags); 954 set_bit(AF_ONLINE, &ha->flags);
948exit_init_hba: 955exit_init_hba:
949 if (is_qla8022(ha) && (status == QLA_ERROR)) { 956 if (is_qla80XX(ha) && (status == QLA_ERROR)) {
950 /* Since interrupts are registered in start_firmware for 957 /* Since interrupts are registered in start_firmware for
951 * 82xx, release them here if initialize_adapter fails */ 958 * 80XX, release them here if initialize_adapter fails */
952 qla4xxx_free_irqs(ha); 959 qla4xxx_free_irqs(ha);
953 } 960 }
954 961
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
index 62f90bdec5d..6f4decd44c6 100644
--- a/drivers/scsi/qla4xxx/ql4_inline.h
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 2a2022a6bb9..f48f37a281d 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -192,35 +192,47 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
192 } 192 }
193} 193}
194 194
195void qla4_83xx_queue_iocb(struct scsi_qla_host *ha)
196{
197 writel(ha->request_in, &ha->qla4_83xx_reg->req_q_in);
198 readl(&ha->qla4_83xx_reg->req_q_in);
199}
200
201void qla4_83xx_complete_iocb(struct scsi_qla_host *ha)
202{
203 writel(ha->response_out, &ha->qla4_83xx_reg->rsp_q_out);
204 readl(&ha->qla4_83xx_reg->rsp_q_out);
205}
206
195/** 207/**
196 * qla4_8xxx_queue_iocb - Tell ISP it's got new request(s) 208 * qla4_82xx_queue_iocb - Tell ISP it's got new request(s)
197 * @ha: pointer to host adapter structure. 209 * @ha: pointer to host adapter structure.
198 * 210 *
199 * This routine notifies the ISP that one or more new request 211 * This routine notifies the ISP that one or more new request
200 * queue entries have been placed on the request queue. 212 * queue entries have been placed on the request queue.
201 **/ 213 **/
202void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha) 214void qla4_82xx_queue_iocb(struct scsi_qla_host *ha)
203{ 215{
204 uint32_t dbval = 0; 216 uint32_t dbval = 0;
205 217
206 dbval = 0x14 | (ha->func_num << 5); 218 dbval = 0x14 | (ha->func_num << 5);
207 dbval = dbval | (0 << 8) | (ha->request_in << 16); 219 dbval = dbval | (0 << 8) | (ha->request_in << 16);
208 220
209 qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in); 221 qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
210} 222}
211 223
212/** 224/**
213 * qla4_8xxx_complete_iocb - Tell ISP we're done with response(s) 225 * qla4_82xx_complete_iocb - Tell ISP we're done with response(s)
214 * @ha: pointer to host adapter structure. 226 * @ha: pointer to host adapter structure.
215 * 227 *
216 * This routine notifies the ISP that one or more response/completion 228 * This routine notifies the ISP that one or more response/completion
217 * queue entries have been processed by the driver. 229 * queue entries have been processed by the driver.
218 * This also clears the interrupt. 230 * This also clears the interrupt.
219 **/ 231 **/
220void qla4_8xxx_complete_iocb(struct scsi_qla_host *ha) 232void qla4_82xx_complete_iocb(struct scsi_qla_host *ha)
221{ 233{
222 writel(ha->response_out, &ha->qla4_8xxx_reg->rsp_q_out); 234 writel(ha->response_out, &ha->qla4_82xx_reg->rsp_q_out);
223 readl(&ha->qla4_8xxx_reg->rsp_q_out); 235 readl(&ha->qla4_82xx_reg->rsp_q_out);
224} 236}
225 237
226/** 238/**
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index fc542a9bb10..15ea81465ce 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -126,7 +126,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
126 ql4_printk(KERN_WARNING, ha, "%s invalid status entry: " 126 ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
127 "handle=0x%0x, srb=%p\n", __func__, 127 "handle=0x%0x, srb=%p\n", __func__,
128 sts_entry->handle, srb); 128 sts_entry->handle, srb);
129 if (is_qla8022(ha)) 129 if (is_qla80XX(ha))
130 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 130 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
131 else 131 else
132 set_bit(DPC_RESET_HA, &ha->dpc_flags); 132 set_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -243,56 +243,72 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
243 243
244 scsi_set_resid(cmd, residual); 244 scsi_set_resid(cmd, residual);
245 245
246 /* 246 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) {
247 * If there is scsi_status, it takes precedense over 247
248 * underflow condition. 248 /* Both the firmware and target reported UNDERRUN:
249 */ 249 *
250 if (scsi_status != 0) { 250 * MID-LAYER UNDERFLOW case:
251 cmd->result = DID_OK << 16 | scsi_status; 251 * Some kernels do not properly detect midlayer
252 * underflow, so we manually check it and return
253 * ERROR if the minimum required data was not
254 * received.
255 *
256 * ALL OTHER cases:
257 * Fall thru to check scsi_status
258 */
259 if (!scsi_status && (scsi_bufflen(cmd) - residual) <
260 cmd->underflow) {
261 DEBUG2(ql4_printk(KERN_INFO, ha,
262 "scsi%ld:%d:%d:%d: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n",
263 ha->host_no,
264 cmd->device->channel,
265 cmd->device->id,
266 cmd->device->lun, __func__,
267 scsi_bufflen(cmd),
268 residual));
252 269
253 if (scsi_status != SCSI_CHECK_CONDITION) 270 cmd->result = DID_ERROR << 16;
254 break; 271 break;
272 }
273
274 } else if (scsi_status != SAM_STAT_TASK_SET_FULL &&
275 scsi_status != SAM_STAT_BUSY) {
255 276
256 /* Copy Sense Data into sense buffer. */
257 qla4xxx_copy_sense(ha, sts_entry, srb);
258 } else {
259 /* 277 /*
260 * If RISC reports underrun and target does not 278 * The firmware reports UNDERRUN, but the target does
261 * report it then we must have a lost frame, so 279 * not report it:
262 * tell upper layer to retry it by reporting a 280 *
263 * bus busy. 281 * scsi_status | host_byte device_byte
282 * | (19:16) (7:0)
283 * ============= | ========= ===========
284 * TASK_SET_FULL | DID_OK scsi_status
285 * BUSY | DID_OK scsi_status
286 * ALL OTHERS | DID_ERROR scsi_status
287 *
288 * Note: If scsi_status is task set full or busy,
289 * then this else if would fall thru to check the
290 * scsi_status and return DID_OK.
264 */ 291 */
265 if ((sts_entry->iscsiFlags &
266 ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
267 cmd->result = DID_BUS_BUSY << 16;
268 } else if ((scsi_bufflen(cmd) - residual) <
269 cmd->underflow) {
270 /*
271 * Handle mid-layer underflow???
272 *
273 * For kernels less than 2.4, the driver must
274 * return an error if an underflow is detected.
275 * For kernels equal-to and above 2.4, the
276 * mid-layer will appearantly handle the
277 * underflow by detecting the residual count --
278 * unfortunately, we do not see where this is
279 * actually being done. In the interim, we
280 * will return DID_ERROR.
281 */
282 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
283 "Mid-layer Data underrun1, "
284 "xferlen = 0x%x, "
285 "residual = 0x%x\n", ha->host_no,
286 cmd->device->channel,
287 cmd->device->id,
288 cmd->device->lun, __func__,
289 scsi_bufflen(cmd), residual));
290 292
291 cmd->result = DID_ERROR << 16; 293 DEBUG2(ql4_printk(KERN_INFO, ha,
292 } else { 294 "scsi%ld:%d:%d:%d: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
293 cmd->result = DID_OK << 16; 295 ha->host_no,
294 } 296 cmd->device->channel,
297 cmd->device->id,
298 cmd->device->lun, __func__,
299 residual,
300 scsi_bufflen(cmd)));
301
302 cmd->result = DID_ERROR << 16 | scsi_status;
303 goto check_scsi_status;
295 } 304 }
305
306 cmd->result = DID_OK << 16 | scsi_status;
307
308check_scsi_status:
309 if (scsi_status == SAM_STAT_CHECK_CONDITION)
310 qla4xxx_copy_sense(ha, sts_entry, srb);
311
296 break; 312 break;
297 313
298 case SCS_DEVICE_LOGGED_OUT: 314 case SCS_DEVICE_LOGGED_OUT:
@@ -578,6 +594,14 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
578{ 594{
579 int i; 595 int i;
580 uint32_t mbox_sts[MBOX_AEN_REG_COUNT]; 596 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
597 __le32 __iomem *mailbox_out;
598
599 if (is_qla8032(ha))
600 mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0];
601 else if (is_qla8022(ha))
602 mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0];
603 else
604 mailbox_out = &ha->reg->mailbox[0];
581 605
582 if ((mbox_status == MBOX_STS_BUSY) || 606 if ((mbox_status == MBOX_STS_BUSY) ||
583 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) || 607 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
@@ -590,9 +614,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
590 * location and set mailbox command done flag 614 * location and set mailbox command done flag
591 */ 615 */
592 for (i = 0; i < ha->mbox_status_count; i++) 616 for (i = 0; i < ha->mbox_status_count; i++)
593 ha->mbox_status[i] = is_qla8022(ha) 617 ha->mbox_status[i] = readl(&mailbox_out[i]);
594 ? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
595 : readl(&ha->reg->mailbox[i]);
596 618
597 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags); 619 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
598 620
@@ -601,9 +623,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
601 } 623 }
602 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) { 624 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
603 for (i = 0; i < MBOX_AEN_REG_COUNT; i++) 625 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
604 mbox_sts[i] = is_qla8022(ha) 626 mbox_sts[i] = readl(&mailbox_out[i]);
605 ? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
606 : readl(&ha->reg->mailbox[i]);
607 627
608 /* Immediately process the AENs that don't require much work. 628 /* Immediately process the AENs that don't require much work.
609 * Only queue the database_changed AENs */ 629 * Only queue the database_changed AENs */
@@ -619,7 +639,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
619 ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__); 639 ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
620 qla4xxx_dump_registers(ha); 640 qla4xxx_dump_registers(ha);
621 641
622 if (ql4xdontresethba) { 642 if ((is_qla8022(ha) && ql4xdontresethba) ||
643 (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
623 DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n", 644 DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
624 ha->host_no, __func__)); 645 ha->host_no, __func__));
625 } else { 646 } else {
@@ -635,7 +656,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
635 case MBOX_ASTS_DHCP_LEASE_EXPIRED: 656 case MBOX_ASTS_DHCP_LEASE_EXPIRED:
636 DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, " 657 DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
637 "Reset HA\n", ha->host_no, mbox_status)); 658 "Reset HA\n", ha->host_no, mbox_status));
638 if (is_qla8022(ha)) 659 if (is_qla80XX(ha))
639 set_bit(DPC_RESET_HA_FW_CONTEXT, 660 set_bit(DPC_RESET_HA_FW_CONTEXT,
640 &ha->dpc_flags); 661 &ha->dpc_flags);
641 else 662 else
@@ -700,7 +721,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
700 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); 721 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
701 else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) && 722 else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
702 (mbox_sts[2] == ACB_STATE_VALID)) { 723 (mbox_sts[2] == ACB_STATE_VALID)) {
703 if (is_qla8022(ha)) 724 if (is_qla80XX(ha))
704 set_bit(DPC_RESET_HA_FW_CONTEXT, 725 set_bit(DPC_RESET_HA_FW_CONTEXT,
705 &ha->dpc_flags); 726 &ha->dpc_flags);
706 else 727 else
@@ -785,6 +806,43 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
785 " removed\n", ha->host_no, mbox_sts[0])); 806 " removed\n", ha->host_no, mbox_sts[0]));
786 break; 807 break;
787 808
809 case MBOX_ASTS_IDC_NOTIFY:
810 {
811 uint32_t opcode;
812 if (is_qla8032(ha)) {
813 DEBUG2(ql4_printk(KERN_INFO, ha,
814 "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
815 ha->host_no, mbox_sts[0],
816 mbox_sts[1], mbox_sts[2],
817 mbox_sts[3], mbox_sts[4]));
818 opcode = mbox_sts[1] >> 16;
819 if ((opcode == MBOX_CMD_SET_PORT_CONFIG) ||
820 (opcode == MBOX_CMD_PORT_RESET)) {
821 set_bit(DPC_POST_IDC_ACK,
822 &ha->dpc_flags);
823 ha->idc_info.request_desc = mbox_sts[1];
824 ha->idc_info.info1 = mbox_sts[2];
825 ha->idc_info.info2 = mbox_sts[3];
826 ha->idc_info.info3 = mbox_sts[4];
827 qla4xxx_wake_dpc(ha);
828 }
829 }
830 break;
831 }
832
833 case MBOX_ASTS_IDC_COMPLETE:
834 if (is_qla8032(ha)) {
835 DEBUG2(ql4_printk(KERN_INFO, ha,
836 "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
837 ha->host_no, mbox_sts[0],
838 mbox_sts[1], mbox_sts[2],
839 mbox_sts[3], mbox_sts[4]));
840 DEBUG2(ql4_printk(KERN_INFO, ha,
841 "scsi:%ld: AEN %04x IDC Complete notification\n",
842 ha->host_no, mbox_sts[0]));
843 }
844 break;
845
788 default: 846 default:
789 DEBUG2(printk(KERN_WARNING 847 DEBUG2(printk(KERN_WARNING
790 "scsi%ld: AEN %04x UNKNOWN\n", 848 "scsi%ld: AEN %04x UNKNOWN\n",
@@ -799,14 +857,31 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
799 } 857 }
800} 858}
801 859
860void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
861 uint32_t intr_status)
862{
863 /* Process mailbox/asynch event interrupt.*/
864 if (intr_status) {
865 qla4xxx_isr_decode_mailbox(ha,
866 readl(&ha->qla4_83xx_reg->mailbox_out[0]));
867 /* clear the interrupt */
868 writel(0, &ha->qla4_83xx_reg->risc_intr);
869 } else {
870 qla4xxx_process_response_queue(ha);
871 }
872
873 /* clear the interrupt */
874 writel(0, &ha->qla4_83xx_reg->mb_int_mask);
875}
876
802/** 877/**
803 * qla4_8xxx_interrupt_service_routine - isr 878 * qla4_82xx_interrupt_service_routine - isr
804 * @ha: pointer to host adapter structure. 879 * @ha: pointer to host adapter structure.
805 * 880 *
806 * This is the main interrupt service routine. 881 * This is the main interrupt service routine.
807 * hardware_lock locked upon entry. runs in interrupt context. 882 * hardware_lock locked upon entry. runs in interrupt context.
808 **/ 883 **/
809void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha, 884void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
810 uint32_t intr_status) 885 uint32_t intr_status)
811{ 886{
812 /* Process response queue interrupt. */ 887 /* Process response queue interrupt. */
@@ -816,11 +891,11 @@ void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha,
816 /* Process mailbox/asynch event interrupt.*/ 891 /* Process mailbox/asynch event interrupt.*/
817 if (intr_status & HSRX_RISC_MB_INT) 892 if (intr_status & HSRX_RISC_MB_INT)
818 qla4xxx_isr_decode_mailbox(ha, 893 qla4xxx_isr_decode_mailbox(ha,
819 readl(&ha->qla4_8xxx_reg->mailbox_out[0])); 894 readl(&ha->qla4_82xx_reg->mailbox_out[0]));
820 895
821 /* clear the interrupt */ 896 /* clear the interrupt */
822 writel(0, &ha->qla4_8xxx_reg->host_int); 897 writel(0, &ha->qla4_82xx_reg->host_int);
823 readl(&ha->qla4_8xxx_reg->host_int); 898 readl(&ha->qla4_82xx_reg->host_int);
824} 899}
825 900
826/** 901/**
@@ -850,12 +925,12 @@ void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
850} 925}
851 926
852/** 927/**
853 * qla4_8xxx_spurious_interrupt - processes spurious interrupt 928 * qla4_82xx_spurious_interrupt - processes spurious interrupt
854 * @ha: pointer to host adapter structure. 929 * @ha: pointer to host adapter structure.
855 * @reqs_count: . 930 * @reqs_count: .
856 * 931 *
857 **/ 932 **/
858static void qla4_8xxx_spurious_interrupt(struct scsi_qla_host *ha, 933static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
859 uint8_t reqs_count) 934 uint8_t reqs_count)
860{ 935{
861 if (reqs_count) 936 if (reqs_count)
@@ -863,9 +938,9 @@ static void qla4_8xxx_spurious_interrupt(struct scsi_qla_host *ha,
863 938
864 DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n")); 939 DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
865 if (is_qla8022(ha)) { 940 if (is_qla8022(ha)) {
866 writel(0, &ha->qla4_8xxx_reg->host_int); 941 writel(0, &ha->qla4_82xx_reg->host_int);
867 if (test_bit(AF_INTx_ENABLED, &ha->flags)) 942 if (test_bit(AF_INTx_ENABLED, &ha->flags))
868 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 943 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
869 0xfbff); 944 0xfbff);
870 } 945 }
871 ha->spurious_int_count++; 946 ha->spurious_int_count++;
@@ -968,11 +1043,11 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
968} 1043}
969 1044
970/** 1045/**
971 * qla4_8xxx_intr_handler - hardware interrupt handler. 1046 * qla4_82xx_intr_handler - hardware interrupt handler.
972 * @irq: Unused 1047 * @irq: Unused
973 * @dev_id: Pointer to host adapter structure 1048 * @dev_id: Pointer to host adapter structure
974 **/ 1049 **/
975irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id) 1050irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id)
976{ 1051{
977 struct scsi_qla_host *ha = dev_id; 1052 struct scsi_qla_host *ha = dev_id;
978 uint32_t intr_status; 1053 uint32_t intr_status;
@@ -984,11 +1059,11 @@ irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
984 return IRQ_HANDLED; 1059 return IRQ_HANDLED;
985 1060
986 ha->isr_count++; 1061 ha->isr_count++;
987 status = qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); 1062 status = qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
988 if (!(status & ha->nx_legacy_intr.int_vec_bit)) 1063 if (!(status & ha->nx_legacy_intr.int_vec_bit))
989 return IRQ_NONE; 1064 return IRQ_NONE;
990 1065
991 status = qla4_8xxx_rd_32(ha, ISR_INT_STATE_REG); 1066 status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG);
992 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) { 1067 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
993 DEBUG2(ql4_printk(KERN_INFO, ha, 1068 DEBUG2(ql4_printk(KERN_INFO, ha,
994 "%s legacy Int not triggered\n", __func__)); 1069 "%s legacy Int not triggered\n", __func__));
@@ -996,30 +1071,30 @@ irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
996 } 1071 }
997 1072
998 /* clear the interrupt */ 1073 /* clear the interrupt */
999 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); 1074 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
1000 1075
1001 /* read twice to ensure write is flushed */ 1076 /* read twice to ensure write is flushed */
1002 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); 1077 qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1003 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); 1078 qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1004 1079
1005 spin_lock_irqsave(&ha->hardware_lock, flags); 1080 spin_lock_irqsave(&ha->hardware_lock, flags);
1006 while (1) { 1081 while (1) {
1007 if (!(readl(&ha->qla4_8xxx_reg->host_int) & 1082 if (!(readl(&ha->qla4_82xx_reg->host_int) &
1008 ISRX_82XX_RISC_INT)) { 1083 ISRX_82XX_RISC_INT)) {
1009 qla4_8xxx_spurious_interrupt(ha, reqs_count); 1084 qla4_82xx_spurious_interrupt(ha, reqs_count);
1010 break; 1085 break;
1011 } 1086 }
1012 intr_status = readl(&ha->qla4_8xxx_reg->host_status); 1087 intr_status = readl(&ha->qla4_82xx_reg->host_status);
1013 if ((intr_status & 1088 if ((intr_status &
1014 (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) { 1089 (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
1015 qla4_8xxx_spurious_interrupt(ha, reqs_count); 1090 qla4_82xx_spurious_interrupt(ha, reqs_count);
1016 break; 1091 break;
1017 } 1092 }
1018 1093
1019 ha->isp_ops->interrupt_service_routine(ha, intr_status); 1094 ha->isp_ops->interrupt_service_routine(ha, intr_status);
1020 1095
1021 /* Enable Interrupt */ 1096 /* Enable Interrupt */
1022 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); 1097 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
1023 1098
1024 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) 1099 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1025 break; 1100 break;
@@ -1029,6 +1104,59 @@ irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
1029 return IRQ_HANDLED; 1104 return IRQ_HANDLED;
1030} 1105}
1031 1106
1107#define LEG_INT_PTR_B31 (1 << 31)
1108#define LEG_INT_PTR_B30 (1 << 30)
1109#define PF_BITS_MASK (0xF << 16)
1110
1111/**
1112 * qla4_83xx_intr_handler - hardware interrupt handler.
1113 * @irq: Unused
1114 * @dev_id: Pointer to host adapter structure
1115 **/
1116irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
1117{
1118 struct scsi_qla_host *ha = dev_id;
1119 uint32_t leg_int_ptr = 0;
1120 unsigned long flags = 0;
1121
1122 ha->isr_count++;
1123 leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
1124
1125 /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
1126 if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
1127 ql4_printk(KERN_ERR, ha,
1128 "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
1129 __func__);
1130 return IRQ_NONE;
1131 }
1132
1133 /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
1134 if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
1135 ql4_printk(KERN_ERR, ha,
1136 "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
1137 __func__, (leg_int_ptr & PF_BITS_MASK), ha->pf_bit);
1138 return IRQ_NONE;
1139 }
1140
1141 /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
1142 * Control register and poll till Legacy Interrupt Pointer register
1143 * bit30 is 0.
1144 */
1145 writel(0, &ha->qla4_83xx_reg->leg_int_trig);
1146 do {
1147 leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
1148 if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit)
1149 break;
1150 } while (leg_int_ptr & LEG_INT_PTR_B30);
1151
1152 spin_lock_irqsave(&ha->hardware_lock, flags);
1153 leg_int_ptr = readl(&ha->qla4_83xx_reg->risc_intr);
1154 ha->isp_ops->interrupt_service_routine(ha, leg_int_ptr);
1155 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1156
1157 return IRQ_HANDLED;
1158}
1159
1032irqreturn_t 1160irqreturn_t
1033qla4_8xxx_msi_handler(int irq, void *dev_id) 1161qla4_8xxx_msi_handler(int irq, void *dev_id)
1034{ 1162{
@@ -1043,15 +1171,46 @@ qla4_8xxx_msi_handler(int irq, void *dev_id)
1043 1171
1044 ha->isr_count++; 1172 ha->isr_count++;
1045 /* clear the interrupt */ 1173 /* clear the interrupt */
1046 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); 1174 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
1047 1175
1048 /* read twice to ensure write is flushed */ 1176 /* read twice to ensure write is flushed */
1049 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); 1177 qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1050 qla4_8xxx_rd_32(ha, ISR_INT_VECTOR); 1178 qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1051 1179
1052 return qla4_8xxx_default_intr_handler(irq, dev_id); 1180 return qla4_8xxx_default_intr_handler(irq, dev_id);
1053} 1181}
1054 1182
1183static irqreturn_t qla4_83xx_mailbox_intr_handler(int irq, void *dev_id)
1184{
1185 struct scsi_qla_host *ha = dev_id;
1186 unsigned long flags;
1187 uint32_t ival = 0;
1188
1189 spin_lock_irqsave(&ha->hardware_lock, flags);
1190
1191 ival = readl(&ha->qla4_83xx_reg->risc_intr);
1192 if (ival == 0) {
1193 ql4_printk(KERN_INFO, ha,
1194 "%s: It is a spurious mailbox interrupt!\n",
1195 __func__);
1196 ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
1197 ival &= ~INT_MASK_FW_MB;
1198 writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
1199 goto exit;
1200 }
1201
1202 qla4xxx_isr_decode_mailbox(ha,
1203 readl(&ha->qla4_83xx_reg->mailbox_out[0]));
1204 writel(0, &ha->qla4_83xx_reg->risc_intr);
1205 ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
1206 ival &= ~INT_MASK_FW_MB;
1207 writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
1208 ha->isr_count++;
1209exit:
1210 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1211 return IRQ_HANDLED;
1212}
1213
1055/** 1214/**
1056 * qla4_8xxx_default_intr_handler - hardware interrupt handler. 1215 * qla4_8xxx_default_intr_handler - hardware interrupt handler.
1057 * @irq: Unused 1216 * @irq: Unused
@@ -1068,29 +1227,32 @@ qla4_8xxx_default_intr_handler(int irq, void *dev_id)
1068 uint32_t intr_status; 1227 uint32_t intr_status;
1069 uint8_t reqs_count = 0; 1228 uint8_t reqs_count = 0;
1070 1229
1071 spin_lock_irqsave(&ha->hardware_lock, flags); 1230 if (is_qla8032(ha)) {
1072 while (1) { 1231 qla4_83xx_mailbox_intr_handler(irq, dev_id);
1073 if (!(readl(&ha->qla4_8xxx_reg->host_int) & 1232 } else {
1074 ISRX_82XX_RISC_INT)) { 1233 spin_lock_irqsave(&ha->hardware_lock, flags);
1075 qla4_8xxx_spurious_interrupt(ha, reqs_count); 1234 while (1) {
1076 break; 1235 if (!(readl(&ha->qla4_82xx_reg->host_int) &
1077 } 1236 ISRX_82XX_RISC_INT)) {
1237 qla4_82xx_spurious_interrupt(ha, reqs_count);
1238 break;
1239 }
1078 1240
1079 intr_status = readl(&ha->qla4_8xxx_reg->host_status); 1241 intr_status = readl(&ha->qla4_82xx_reg->host_status);
1080 if ((intr_status & 1242 if ((intr_status &
1081 (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) { 1243 (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
1082 qla4_8xxx_spurious_interrupt(ha, reqs_count); 1244 qla4_82xx_spurious_interrupt(ha, reqs_count);
1083 break; 1245 break;
1084 } 1246 }
1085 1247
1086 ha->isp_ops->interrupt_service_routine(ha, intr_status); 1248 ha->isp_ops->interrupt_service_routine(ha, intr_status);
1087 1249
1088 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) 1250 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1089 break; 1251 break;
1252 }
1253 ha->isr_count++;
1254 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1090 } 1255 }
1091
1092 ha->isr_count++;
1093 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1094 return IRQ_HANDLED; 1256 return IRQ_HANDLED;
1095} 1257}
1096 1258
@@ -1099,13 +1261,25 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
1099{ 1261{
1100 struct scsi_qla_host *ha = dev_id; 1262 struct scsi_qla_host *ha = dev_id;
1101 unsigned long flags; 1263 unsigned long flags;
1264 uint32_t ival = 0;
1102 1265
1103 spin_lock_irqsave(&ha->hardware_lock, flags); 1266 spin_lock_irqsave(&ha->hardware_lock, flags);
1104 qla4xxx_process_response_queue(ha); 1267 if (is_qla8032(ha)) {
1105 writel(0, &ha->qla4_8xxx_reg->host_int); 1268 ival = readl(&ha->qla4_83xx_reg->iocb_int_mask);
1106 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1269 if (ival == 0) {
1107 1270 ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n",
1271 __func__);
1272 goto exit_msix_rsp_q;
1273 }
1274 qla4xxx_process_response_queue(ha);
1275 writel(0, &ha->qla4_83xx_reg->iocb_int_mask);
1276 } else {
1277 qla4xxx_process_response_queue(ha);
1278 writel(0, &ha->qla4_82xx_reg->host_int);
1279 }
1108 ha->isr_count++; 1280 ha->isr_count++;
1281exit_msix_rsp_q:
1282 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1109 return IRQ_HANDLED; 1283 return IRQ_HANDLED;
1110} 1284}
1111 1285
@@ -1177,11 +1351,18 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1177{ 1351{
1178 int ret; 1352 int ret;
1179 1353
1180 if (!is_qla8022(ha)) 1354 if (is_qla40XX(ha))
1181 goto try_intx; 1355 goto try_intx;
1182 1356
1183 if (ql4xenablemsix == 2) 1357 if (ql4xenablemsix == 2) {
1358 /* Note: MSI Interrupts not supported for ISP8324 */
1359 if (is_qla8032(ha)) {
1360 ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP8324, Falling back-to INTx mode\n",
1361 __func__);
1362 goto try_intx;
1363 }
1184 goto try_msi; 1364 goto try_msi;
1365 }
1185 1366
1186 if (ql4xenablemsix == 0 || ql4xenablemsix != 1) 1367 if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
1187 goto try_intx; 1368 goto try_intx;
@@ -1192,6 +1373,12 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1192 DEBUG2(ql4_printk(KERN_INFO, ha, 1373 DEBUG2(ql4_printk(KERN_INFO, ha,
1193 "MSI-X: Enabled (0x%X).\n", ha->revision_id)); 1374 "MSI-X: Enabled (0x%X).\n", ha->revision_id));
1194 goto irq_attached; 1375 goto irq_attached;
1376 } else {
1377 if (is_qla8032(ha)) {
1378 ql4_printk(KERN_INFO, ha, "%s: ISP8324: MSI-X: Falling back-to INTx mode. ret = %d\n",
1379 __func__, ret);
1380 goto try_intx;
1381 }
1195 } 1382 }
1196 1383
1197 ql4_printk(KERN_WARNING, ha, 1384 ql4_printk(KERN_WARNING, ha,
@@ -1214,9 +1401,15 @@ try_msi:
1214 pci_disable_msi(ha->pdev); 1401 pci_disable_msi(ha->pdev);
1215 } 1402 }
1216 } 1403 }
1217 ql4_printk(KERN_WARNING, ha,
1218 "MSI: Falling back-to INTx mode -- %d.\n", ret);
1219 1404
1405 /*
1406 * Prevent interrupts from falling back to INTx mode in cases where
1407 * interrupts cannot get acquired through MSI-X or MSI mode.
1408 */
1409 if (is_qla8022(ha)) {
1410 ql4_printk(KERN_WARNING, ha, "IRQ not attached -- %d.\n", ret);
1411 goto irq_not_attached;
1412 }
1220try_intx: 1413try_intx:
1221 /* Trying INTx */ 1414 /* Trying INTx */
1222 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 1415 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
@@ -1230,7 +1423,7 @@ try_intx:
1230 ql4_printk(KERN_WARNING, ha, 1423 ql4_printk(KERN_WARNING, ha,
1231 "INTx: Failed to reserve interrupt %d already in" 1424 "INTx: Failed to reserve interrupt %d already in"
1232 " use.\n", ha->pdev->irq); 1425 " use.\n", ha->pdev->irq);
1233 return ret; 1426 goto irq_not_attached;
1234 } 1427 }
1235 1428
1236irq_attached: 1429irq_attached:
@@ -1238,6 +1431,7 @@ irq_attached:
1238 ha->host->irq = ha->pdev->irq; 1431 ha->host->irq = ha->pdev->irq;
1239 ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n", 1432 ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
1240 __func__, ha->pdev->irq); 1433 __func__, ha->pdev->irq);
1434irq_not_attached:
1241 return ret; 1435 return ret;
1242} 1436}
1243 1437
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index cab8f665a41..3d41034191f 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -9,7 +9,39 @@
9#include "ql4_glbl.h" 9#include "ql4_glbl.h"
10#include "ql4_dbg.h" 10#include "ql4_dbg.h"
11#include "ql4_inline.h" 11#include "ql4_inline.h"
12#include "ql4_version.h"
12 13
14void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
15 int in_count)
16{
17 int i;
18
19 /* Load all mailbox registers, except mailbox 0. */
20 for (i = 1; i < in_count; i++)
21 writel(mbx_cmd[i], &ha->reg->mailbox[i]);
22
23 /* Wakeup firmware */
24 writel(mbx_cmd[0], &ha->reg->mailbox[0]);
25 readl(&ha->reg->mailbox[0]);
26 writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
27 readl(&ha->reg->ctrl_status);
28}
29
30void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
31{
32 int intr_status;
33
34 intr_status = readl(&ha->reg->ctrl_status);
35 if (intr_status & INTR_PENDING) {
36 /*
37 * Service the interrupt.
38 * The ISR will save the mailbox status registers
39 * to a temporary storage location in the adapter structure.
40 */
41 ha->mbox_status_count = out_count;
42 ha->isp_ops->interrupt_service_routine(ha, intr_status);
43 }
44}
13 45
14/** 46/**
15 * qla4xxx_mailbox_command - issues mailbox commands 47 * qla4xxx_mailbox_command - issues mailbox commands
@@ -30,7 +62,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
30 int status = QLA_ERROR; 62 int status = QLA_ERROR;
31 uint8_t i; 63 uint8_t i;
32 u_long wait_count; 64 u_long wait_count;
33 uint32_t intr_status;
34 unsigned long flags = 0; 65 unsigned long flags = 0;
35 uint32_t dev_state; 66 uint32_t dev_state;
36 67
@@ -77,7 +108,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
77 msleep(10); 108 msleep(10);
78 } 109 }
79 110
80 if (is_qla8022(ha)) { 111 if (is_qla80XX(ha)) {
81 if (test_bit(AF_FW_RECOVERY, &ha->flags)) { 112 if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
82 DEBUG2(ql4_printk(KERN_WARNING, ha, 113 DEBUG2(ql4_printk(KERN_WARNING, ha,
83 "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n", 114 "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
@@ -85,10 +116,10 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
85 goto mbox_exit; 116 goto mbox_exit;
86 } 117 }
87 /* Do not send any mbx cmd if h/w is in failed state*/ 118 /* Do not send any mbx cmd if h/w is in failed state*/
88 qla4_8xxx_idc_lock(ha); 119 ha->isp_ops->idc_lock(ha);
89 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 120 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
90 qla4_8xxx_idc_unlock(ha); 121 ha->isp_ops->idc_unlock(ha);
91 if (dev_state == QLA82XX_DEV_FAILED) { 122 if (dev_state == QLA8XXX_DEV_FAILED) {
92 ql4_printk(KERN_WARNING, ha, 123 ql4_printk(KERN_WARNING, ha,
93 "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n", 124 "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
94 ha->host_no, __func__); 125 ha->host_no, __func__);
@@ -102,30 +133,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
102 for (i = 0; i < outCount; i++) 133 for (i = 0; i < outCount; i++)
103 ha->mbox_status[i] = 0; 134 ha->mbox_status[i] = 0;
104 135
105 if (is_qla8022(ha)) { 136 /* Queue the mailbox command to the firmware */
106 /* Load all mailbox registers, except mailbox 0. */ 137 ha->isp_ops->queue_mailbox_command(ha, mbx_cmd, inCount);
107 DEBUG5(
108 printk("scsi%ld: %s: Cmd ", ha->host_no, __func__);
109 for (i = 0; i < inCount; i++)
110 printk("mb%d=%04x ", i, mbx_cmd[i]);
111 printk("\n"));
112
113 for (i = 1; i < inCount; i++)
114 writel(mbx_cmd[i], &ha->qla4_8xxx_reg->mailbox_in[i]);
115 writel(mbx_cmd[0], &ha->qla4_8xxx_reg->mailbox_in[0]);
116 readl(&ha->qla4_8xxx_reg->mailbox_in[0]);
117 writel(HINT_MBX_INT_PENDING, &ha->qla4_8xxx_reg->hint);
118 } else {
119 /* Load all mailbox registers, except mailbox 0. */
120 for (i = 1; i < inCount; i++)
121 writel(mbx_cmd[i], &ha->reg->mailbox[i]);
122
123 /* Wakeup firmware */
124 writel(mbx_cmd[0], &ha->reg->mailbox[0]);
125 readl(&ha->reg->mailbox[0]);
126 writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
127 readl(&ha->reg->ctrl_status);
128 }
129 138
130 spin_unlock_irqrestore(&ha->hardware_lock, flags); 139 spin_unlock_irqrestore(&ha->hardware_lock, flags);
131 140
@@ -167,37 +176,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
167 */ 176 */
168 177
169 spin_lock_irqsave(&ha->hardware_lock, flags); 178 spin_lock_irqsave(&ha->hardware_lock, flags);
170 if (is_qla8022(ha)) { 179 ha->isp_ops->process_mailbox_interrupt(ha, outCount);
171 intr_status =
172 readl(&ha->qla4_8xxx_reg->host_int);
173 if (intr_status & ISRX_82XX_RISC_INT) {
174 ha->mbox_status_count = outCount;
175 intr_status =
176 readl(&ha->qla4_8xxx_reg->host_status);
177 ha->isp_ops->interrupt_service_routine(
178 ha, intr_status);
179 if (test_bit(AF_INTERRUPTS_ON,
180 &ha->flags) &&
181 test_bit(AF_INTx_ENABLED,
182 &ha->flags))
183 qla4_8xxx_wr_32(ha,
184 ha->nx_legacy_intr.tgt_mask_reg,
185 0xfbff);
186 }
187 } else {
188 intr_status = readl(&ha->reg->ctrl_status);
189 if (intr_status & INTR_PENDING) {
190 /*
191 * Service the interrupt.
192 * The ISR will save the mailbox status
193 * registers to a temporary storage
194 * location in the adapter structure.
195 */
196 ha->mbox_status_count = outCount;
197 ha->isp_ops->interrupt_service_routine(
198 ha, intr_status);
199 }
200 }
201 spin_unlock_irqrestore(&ha->hardware_lock, flags); 180 spin_unlock_irqrestore(&ha->hardware_lock, flags);
202 msleep(10); 181 msleep(10);
203 } 182 }
@@ -205,7 +184,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
205 184
206 /* Check for mailbox timeout. */ 185 /* Check for mailbox timeout. */
207 if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) { 186 if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
208 if (is_qla8022(ha) && 187 if (is_qla80XX(ha) &&
209 test_bit(AF_FW_RECOVERY, &ha->flags)) { 188 test_bit(AF_FW_RECOVERY, &ha->flags)) {
210 DEBUG2(ql4_printk(KERN_INFO, ha, 189 DEBUG2(ql4_printk(KERN_INFO, ha,
211 "scsi%ld: %s: prematurely completing mbx cmd as " 190 "scsi%ld: %s: prematurely completing mbx cmd as "
@@ -222,9 +201,13 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
222 if (is_qla8022(ha)) { 201 if (is_qla8022(ha)) {
223 ql4_printk(KERN_INFO, ha, 202 ql4_printk(KERN_INFO, ha,
224 "disabling pause transmit on port 0 & 1.\n"); 203 "disabling pause transmit on port 0 & 1.\n");
225 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 204 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
226 CRB_NIU_XG_PAUSE_CTL_P0 | 205 CRB_NIU_XG_PAUSE_CTL_P0 |
227 CRB_NIU_XG_PAUSE_CTL_P1); 206 CRB_NIU_XG_PAUSE_CTL_P1);
207 } else if (is_qla8032(ha)) {
208 ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n",
209 __func__);
210 qla4_83xx_disable_pause(ha);
228 } 211 }
229 goto mbox_exit; 212 goto mbox_exit;
230 } 213 }
@@ -373,7 +356,7 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
373 memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); 356 memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
374 357
375 if (is_qla8022(ha)) 358 if (is_qla8022(ha))
376 qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, 0); 359 qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, 0);
377 360
378 mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE; 361 mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
379 mbox_cmd[1] = 0; 362 mbox_cmd[1] = 0;
@@ -566,7 +549,7 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
566 __constant_cpu_to_le16(FWOPT_SESSION_MODE | 549 __constant_cpu_to_le16(FWOPT_SESSION_MODE |
567 FWOPT_INITIATOR_MODE); 550 FWOPT_INITIATOR_MODE);
568 551
569 if (is_qla8022(ha)) 552 if (is_qla80XX(ha))
570 init_fw_cb->fw_options |= 553 init_fw_cb->fw_options |=
571 __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB); 554 __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
572 555
@@ -1695,7 +1678,7 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
1695 conn = cls_conn->dd_data; 1678 conn = cls_conn->dd_data;
1696 qla_conn = conn->dd_data; 1679 qla_conn = conn->dd_data;
1697 sess = conn->session; 1680 sess = conn->session;
1698 dst_addr = &qla_conn->qla_ep->dst_addr; 1681 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
1699 1682
1700 if (dst_addr->sa_family == AF_INET6) 1683 if (dst_addr->sa_family == AF_INET6)
1701 options |= IPV6_DEFAULT_DDB_ENTRY; 1684 options |= IPV6_DEFAULT_DDB_ENTRY;
@@ -1953,3 +1936,72 @@ int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
1953 } 1936 }
1954 return status; 1937 return status;
1955} 1938}
1939
1940/**
1941 * qla4_8xxx_set_param - set driver version in firmware.
1942 * @ha: Pointer to host adapter structure.
1943 * @param: Parameter to set i.e driver version
1944 **/
1945int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param)
1946{
1947 uint32_t mbox_cmd[MBOX_REG_COUNT];
1948 uint32_t mbox_sts[MBOX_REG_COUNT];
1949 uint32_t status;
1950
1951 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1952 memset(&mbox_sts, 0, sizeof(mbox_sts));
1953
1954 mbox_cmd[0] = MBOX_CMD_SET_PARAM;
1955 if (param == SET_DRVR_VERSION) {
1956 mbox_cmd[1] = SET_DRVR_VERSION;
1957 strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION,
1958 MAX_DRVR_VER_LEN);
1959 } else {
1960 ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n",
1961 __func__, param);
1962 status = QLA_ERROR;
1963 goto exit_set_param;
1964 }
1965
1966 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, mbox_cmd,
1967 mbox_sts);
1968 if (status == QLA_ERROR)
1969 ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
1970 __func__, mbox_sts[0]);
1971
1972exit_set_param:
1973 return status;
1974}
1975
1976/**
1977 * qla4_83xx_post_idc_ack - post IDC ACK
1978 * @ha: Pointer to host adapter structure.
1979 *
1980 * Posts IDC ACK for IDC Request Notification AEN.
1981 **/
1982int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha)
1983{
1984 uint32_t mbox_cmd[MBOX_REG_COUNT];
1985 uint32_t mbox_sts[MBOX_REG_COUNT];
1986 int status;
1987
1988 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1989 memset(&mbox_sts, 0, sizeof(mbox_sts));
1990
1991 mbox_cmd[0] = MBOX_CMD_IDC_ACK;
1992 mbox_cmd[1] = ha->idc_info.request_desc;
1993 mbox_cmd[2] = ha->idc_info.info1;
1994 mbox_cmd[3] = ha->idc_info.info2;
1995 mbox_cmd[4] = ha->idc_info.info3;
1996
1997 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
1998 mbox_cmd, mbox_sts);
1999 if (status == QLA_ERROR)
2000 ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
2001 mbox_sts[0]);
2002 else
2003 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n",
2004 __func__));
2005
2006 return status;
2007}
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index 7851f314ba9..325db1f2c09 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
index 945cc328f57..dba0514d1c7 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.h
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 807bf76f1b6..499a92db1cf 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -10,6 +10,7 @@
10#include <linux/ratelimit.h> 10#include <linux/ratelimit.h>
11#include "ql4_def.h" 11#include "ql4_def.h"
12#include "ql4_glbl.h" 12#include "ql4_glbl.h"
13#include "ql4_inline.h"
13 14
14#include <asm-generic/io-64-nonatomic-lo-hi.h> 15#include <asm-generic/io-64-nonatomic-lo-hi.h>
15 16
@@ -27,7 +28,7 @@
27#define CRB_BLK(off) ((off >> 20) & 0x3f) 28#define CRB_BLK(off) ((off >> 20) & 0x3f)
28#define CRB_SUBBLK(off) ((off >> 16) & 0xf) 29#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
29#define CRB_WINDOW_2M (0x130060) 30#define CRB_WINDOW_2M (0x130060)
30#define CRB_HI(off) ((qla4_8xxx_crb_hub_agt[CRB_BLK(off)] << 20) | \ 31#define CRB_HI(off) ((qla4_82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
31 ((off) & 0xf0000)) 32 ((off) & 0xf0000))
32#define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL) 33#define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL)
33#define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL) 34#define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL)
@@ -51,7 +52,7 @@ static int qla4_8xxx_crb_table_initialized;
51 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ 52 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
52 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) 53 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
53static void 54static void
54qla4_8xxx_crb_addr_transform_setup(void) 55qla4_82xx_crb_addr_transform_setup(void)
55{ 56{
56 qla4_8xxx_crb_addr_transform(XDMA); 57 qla4_8xxx_crb_addr_transform(XDMA);
57 qla4_8xxx_crb_addr_transform(TIMR); 58 qla4_8xxx_crb_addr_transform(TIMR);
@@ -268,7 +269,7 @@ static struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
268/* 269/*
269 * top 12 bits of crb internal address (hub, agent) 270 * top 12 bits of crb internal address (hub, agent)
270 */ 271 */
271static unsigned qla4_8xxx_crb_hub_agt[64] = { 272static unsigned qla4_82xx_crb_hub_agt[64] = {
272 0, 273 0,
273 QLA82XX_HW_CRB_HUB_AGT_ADR_PS, 274 QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
274 QLA82XX_HW_CRB_HUB_AGT_ADR_MN, 275 QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
@@ -353,7 +354,7 @@ static char *qdev_state[] = {
353 * side effect: lock crb window 354 * side effect: lock crb window
354 */ 355 */
355static void 356static void
356qla4_8xxx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off) 357qla4_82xx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
357{ 358{
358 u32 win_read; 359 u32 win_read;
359 360
@@ -373,96 +374,115 @@ qla4_8xxx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
373} 374}
374 375
375void 376void
376qla4_8xxx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data) 377qla4_82xx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data)
377{ 378{
378 unsigned long flags = 0; 379 unsigned long flags = 0;
379 int rv; 380 int rv;
380 381
381 rv = qla4_8xxx_pci_get_crb_addr_2M(ha, &off); 382 rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off);
382 383
383 BUG_ON(rv == -1); 384 BUG_ON(rv == -1);
384 385
385 if (rv == 1) { 386 if (rv == 1) {
386 write_lock_irqsave(&ha->hw_lock, flags); 387 write_lock_irqsave(&ha->hw_lock, flags);
387 qla4_8xxx_crb_win_lock(ha); 388 qla4_82xx_crb_win_lock(ha);
388 qla4_8xxx_pci_set_crbwindow_2M(ha, &off); 389 qla4_82xx_pci_set_crbwindow_2M(ha, &off);
389 } 390 }
390 391
391 writel(data, (void __iomem *)off); 392 writel(data, (void __iomem *)off);
392 393
393 if (rv == 1) { 394 if (rv == 1) {
394 qla4_8xxx_crb_win_unlock(ha); 395 qla4_82xx_crb_win_unlock(ha);
395 write_unlock_irqrestore(&ha->hw_lock, flags); 396 write_unlock_irqrestore(&ha->hw_lock, flags);
396 } 397 }
397} 398}
398 399
399int 400uint32_t qla4_82xx_rd_32(struct scsi_qla_host *ha, ulong off)
400qla4_8xxx_rd_32(struct scsi_qla_host *ha, ulong off)
401{ 401{
402 unsigned long flags = 0; 402 unsigned long flags = 0;
403 int rv; 403 int rv;
404 u32 data; 404 u32 data;
405 405
406 rv = qla4_8xxx_pci_get_crb_addr_2M(ha, &off); 406 rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off);
407 407
408 BUG_ON(rv == -1); 408 BUG_ON(rv == -1);
409 409
410 if (rv == 1) { 410 if (rv == 1) {
411 write_lock_irqsave(&ha->hw_lock, flags); 411 write_lock_irqsave(&ha->hw_lock, flags);
412 qla4_8xxx_crb_win_lock(ha); 412 qla4_82xx_crb_win_lock(ha);
413 qla4_8xxx_pci_set_crbwindow_2M(ha, &off); 413 qla4_82xx_pci_set_crbwindow_2M(ha, &off);
414 } 414 }
415 data = readl((void __iomem *)off); 415 data = readl((void __iomem *)off);
416 416
417 if (rv == 1) { 417 if (rv == 1) {
418 qla4_8xxx_crb_win_unlock(ha); 418 qla4_82xx_crb_win_unlock(ha);
419 write_unlock_irqrestore(&ha->hw_lock, flags); 419 write_unlock_irqrestore(&ha->hw_lock, flags);
420 } 420 }
421 return data; 421 return data;
422} 422}
423 423
424/* Minidump related functions */ 424/* Minidump related functions */
425static int qla4_8xxx_md_rw_32(struct scsi_qla_host *ha, uint32_t off, 425int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data)
426 u32 data, uint8_t flag)
427{ 426{
428 uint32_t win_read, off_value, rval = QLA_SUCCESS; 427 uint32_t win_read, off_value;
428 int rval = QLA_SUCCESS;
429 429
430 off_value = off & 0xFFFF0000; 430 off_value = off & 0xFFFF0000;
431 writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); 431 writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
432 432
433 /* Read back value to make sure write has gone through before trying 433 /*
434 * Read back value to make sure write has gone through before trying
434 * to use it. 435 * to use it.
435 */ 436 */
436 win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); 437 win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
437 if (win_read != off_value) { 438 if (win_read != off_value) {
438 DEBUG2(ql4_printk(KERN_INFO, ha, 439 DEBUG2(ql4_printk(KERN_INFO, ha,
439 "%s: Written (0x%x) != Read (0x%x), off=0x%x\n", 440 "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
440 __func__, off_value, win_read, off)); 441 __func__, off_value, win_read, off));
441 return QLA_ERROR; 442 rval = QLA_ERROR;
443 } else {
444 off_value = off & 0x0000FFFF;
445 *data = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
446 ha->nx_pcibase));
442 } 447 }
448 return rval;
449}
443 450
444 off_value = off & 0x0000FFFF; 451int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data)
452{
453 uint32_t win_read, off_value;
454 int rval = QLA_SUCCESS;
455
456 off_value = off & 0xFFFF0000;
457 writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
445 458
446 if (flag) 459 /* Read back value to make sure write has gone through before trying
460 * to use it.
461 */
462 win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
463 if (win_read != off_value) {
464 DEBUG2(ql4_printk(KERN_INFO, ha,
465 "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
466 __func__, off_value, win_read, off));
467 rval = QLA_ERROR;
468 } else {
469 off_value = off & 0x0000FFFF;
447 writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M + 470 writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
448 ha->nx_pcibase)); 471 ha->nx_pcibase));
449 else 472 }
450 rval = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
451 ha->nx_pcibase));
452
453 return rval; 473 return rval;
454} 474}
455 475
456#define CRB_WIN_LOCK_TIMEOUT 100000000 476#define CRB_WIN_LOCK_TIMEOUT 100000000
457 477
458int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha) 478int qla4_82xx_crb_win_lock(struct scsi_qla_host *ha)
459{ 479{
460 int i; 480 int i;
461 int done = 0, timeout = 0; 481 int done = 0, timeout = 0;
462 482
463 while (!done) { 483 while (!done) {
464 /* acquire semaphore3 from PCI HW block */ 484 /* acquire semaphore3 from PCI HW block */
465 done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK)); 485 done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
466 if (done == 1) 486 if (done == 1)
467 break; 487 break;
468 if (timeout >= CRB_WIN_LOCK_TIMEOUT) 488 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
@@ -478,32 +498,32 @@ int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
478 cpu_relax(); /*This a nop instr on i386*/ 498 cpu_relax(); /*This a nop instr on i386*/
479 } 499 }
480 } 500 }
481 qla4_8xxx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num); 501 qla4_82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num);
482 return 0; 502 return 0;
483} 503}
484 504
485void qla4_8xxx_crb_win_unlock(struct scsi_qla_host *ha) 505void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha)
486{ 506{
487 qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); 507 qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
488} 508}
489 509
490#define IDC_LOCK_TIMEOUT 100000000 510#define IDC_LOCK_TIMEOUT 100000000
491 511
492/** 512/**
493 * qla4_8xxx_idc_lock - hw_lock 513 * qla4_82xx_idc_lock - hw_lock
494 * @ha: pointer to adapter structure 514 * @ha: pointer to adapter structure
495 * 515 *
496 * General purpose lock used to synchronize access to 516 * General purpose lock used to synchronize access to
497 * CRB_DEV_STATE, CRB_DEV_REF_COUNT, etc. 517 * CRB_DEV_STATE, CRB_DEV_REF_COUNT, etc.
498 **/ 518 **/
499int qla4_8xxx_idc_lock(struct scsi_qla_host *ha) 519int qla4_82xx_idc_lock(struct scsi_qla_host *ha)
500{ 520{
501 int i; 521 int i;
502 int done = 0, timeout = 0; 522 int done = 0, timeout = 0;
503 523
504 while (!done) { 524 while (!done) {
505 /* acquire semaphore5 from PCI HW block */ 525 /* acquire semaphore5 from PCI HW block */
506 done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK)); 526 done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
507 if (done == 1) 527 if (done == 1)
508 break; 528 break;
509 if (timeout >= IDC_LOCK_TIMEOUT) 529 if (timeout >= IDC_LOCK_TIMEOUT)
@@ -522,13 +542,13 @@ int qla4_8xxx_idc_lock(struct scsi_qla_host *ha)
522 return 0; 542 return 0;
523} 543}
524 544
525void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha) 545void qla4_82xx_idc_unlock(struct scsi_qla_host *ha)
526{ 546{
527 qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); 547 qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
528} 548}
529 549
530int 550int
531qla4_8xxx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off) 551qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off)
532{ 552{
533 struct crb_128M_2M_sub_block_map *m; 553 struct crb_128M_2M_sub_block_map *m;
534 554
@@ -562,44 +582,40 @@ qla4_8xxx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off)
562 return 1; 582 return 1;
563} 583}
564 584
565/* PCI Windowing for DDR regions. */
566#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
567 (((addr) <= (high)) && ((addr) >= (low)))
568
569/* 585/*
570* check memory access boundary. 586* check memory access boundary.
571* used by test agent. support ddr access only for now 587* used by test agent. support ddr access only for now
572*/ 588*/
573static unsigned long 589static unsigned long
574qla4_8xxx_pci_mem_bound_check(struct scsi_qla_host *ha, 590qla4_82xx_pci_mem_bound_check(struct scsi_qla_host *ha,
575 unsigned long long addr, int size) 591 unsigned long long addr, int size)
576{ 592{
577 if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 593 if (!QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
578 QLA82XX_ADDR_DDR_NET_MAX) || 594 QLA8XXX_ADDR_DDR_NET_MAX) ||
579 !QLA82XX_ADDR_IN_RANGE(addr + size - 1, 595 !QLA8XXX_ADDR_IN_RANGE(addr + size - 1,
580 QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX) || 596 QLA8XXX_ADDR_DDR_NET, QLA8XXX_ADDR_DDR_NET_MAX) ||
581 ((size != 1) && (size != 2) && (size != 4) && (size != 8))) { 597 ((size != 1) && (size != 2) && (size != 4) && (size != 8))) {
582 return 0; 598 return 0;
583 } 599 }
584 return 1; 600 return 1;
585} 601}
586 602
587static int qla4_8xxx_pci_set_window_warning_count; 603static int qla4_82xx_pci_set_window_warning_count;
588 604
589static unsigned long 605static unsigned long
590qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr) 606qla4_82xx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
591{ 607{
592 int window; 608 int window;
593 u32 win_read; 609 u32 win_read;
594 610
595 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 611 if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
596 QLA82XX_ADDR_DDR_NET_MAX)) { 612 QLA8XXX_ADDR_DDR_NET_MAX)) {
597 /* DDR network side */ 613 /* DDR network side */
598 window = MN_WIN(addr); 614 window = MN_WIN(addr);
599 ha->ddr_mn_window = window; 615 ha->ddr_mn_window = window;
600 qla4_8xxx_wr_32(ha, ha->mn_win_crb | 616 qla4_82xx_wr_32(ha, ha->mn_win_crb |
601 QLA82XX_PCI_CRBSPACE, window); 617 QLA82XX_PCI_CRBSPACE, window);
602 win_read = qla4_8xxx_rd_32(ha, ha->mn_win_crb | 618 win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb |
603 QLA82XX_PCI_CRBSPACE); 619 QLA82XX_PCI_CRBSPACE);
604 if ((win_read << 17) != window) { 620 if ((win_read << 17) != window) {
605 ql4_printk(KERN_WARNING, ha, 621 ql4_printk(KERN_WARNING, ha,
@@ -607,8 +623,8 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
607 __func__, window, win_read); 623 __func__, window, win_read);
608 } 624 }
609 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; 625 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
610 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, 626 } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0,
611 QLA82XX_ADDR_OCM0_MAX)) { 627 QLA8XXX_ADDR_OCM0_MAX)) {
612 unsigned int temp1; 628 unsigned int temp1;
613 /* if bits 19:18&17:11 are on */ 629 /* if bits 19:18&17:11 are on */
614 if ((addr & 0x00ff800) == 0xff800) { 630 if ((addr & 0x00ff800) == 0xff800) {
@@ -618,9 +634,9 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
618 634
619 window = OCM_WIN(addr); 635 window = OCM_WIN(addr);
620 ha->ddr_mn_window = window; 636 ha->ddr_mn_window = window;
621 qla4_8xxx_wr_32(ha, ha->mn_win_crb | 637 qla4_82xx_wr_32(ha, ha->mn_win_crb |
622 QLA82XX_PCI_CRBSPACE, window); 638 QLA82XX_PCI_CRBSPACE, window);
623 win_read = qla4_8xxx_rd_32(ha, ha->mn_win_crb | 639 win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb |
624 QLA82XX_PCI_CRBSPACE); 640 QLA82XX_PCI_CRBSPACE);
625 temp1 = ((window & 0x1FF) << 7) | 641 temp1 = ((window & 0x1FF) << 7) |
626 ((window & 0x0FFFE0000) >> 17); 642 ((window & 0x0FFFE0000) >> 17);
@@ -630,14 +646,14 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
630 } 646 }
631 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; 647 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
632 648
633 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, 649 } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
634 QLA82XX_P3_ADDR_QDR_NET_MAX)) { 650 QLA82XX_P3_ADDR_QDR_NET_MAX)) {
635 /* QDR network side */ 651 /* QDR network side */
636 window = MS_WIN(addr); 652 window = MS_WIN(addr);
637 ha->qdr_sn_window = window; 653 ha->qdr_sn_window = window;
638 qla4_8xxx_wr_32(ha, ha->ms_win_crb | 654 qla4_82xx_wr_32(ha, ha->ms_win_crb |
639 QLA82XX_PCI_CRBSPACE, window); 655 QLA82XX_PCI_CRBSPACE, window);
640 win_read = qla4_8xxx_rd_32(ha, 656 win_read = qla4_82xx_rd_32(ha,
641 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); 657 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
642 if (win_read != window) { 658 if (win_read != window) {
643 printk("%s: Written MSwin (0x%x) != Read " 659 printk("%s: Written MSwin (0x%x) != Read "
@@ -650,8 +666,8 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
650 * peg gdb frequently accesses memory that doesn't exist, 666 * peg gdb frequently accesses memory that doesn't exist,
651 * this limits the chit chat so debugging isn't slowed down. 667 * this limits the chit chat so debugging isn't slowed down.
652 */ 668 */
653 if ((qla4_8xxx_pci_set_window_warning_count++ < 8) || 669 if ((qla4_82xx_pci_set_window_warning_count++ < 8) ||
654 (qla4_8xxx_pci_set_window_warning_count%64 == 0)) { 670 (qla4_82xx_pci_set_window_warning_count%64 == 0)) {
655 printk("%s: Warning:%s Unknown address range!\n", 671 printk("%s: Warning:%s Unknown address range!\n",
656 __func__, DRIVER_NAME); 672 __func__, DRIVER_NAME);
657 } 673 }
@@ -661,7 +677,7 @@ qla4_8xxx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
661} 677}
662 678
663/* check if address is in the same windows as the previous access */ 679/* check if address is in the same windows as the previous access */
664static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha, 680static int qla4_82xx_pci_is_same_window(struct scsi_qla_host *ha,
665 unsigned long long addr) 681 unsigned long long addr)
666{ 682{
667 int window; 683 int window;
@@ -669,20 +685,20 @@ static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha,
669 685
670 qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX; 686 qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
671 687
672 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 688 if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
673 QLA82XX_ADDR_DDR_NET_MAX)) { 689 QLA8XXX_ADDR_DDR_NET_MAX)) {
674 /* DDR network side */ 690 /* DDR network side */
675 BUG(); /* MN access can not come here */ 691 BUG(); /* MN access can not come here */
676 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, 692 } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0,
677 QLA82XX_ADDR_OCM0_MAX)) { 693 QLA8XXX_ADDR_OCM0_MAX)) {
678 return 1; 694 return 1;
679 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1, 695 } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM1,
680 QLA82XX_ADDR_OCM1_MAX)) { 696 QLA8XXX_ADDR_OCM1_MAX)) {
681 return 1; 697 return 1;
682 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, 698 } else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
683 qdr_max)) { 699 qdr_max)) {
684 /* QDR network side */ 700 /* QDR network side */
685 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f; 701 window = ((addr - QLA8XXX_ADDR_QDR_NET) >> 22) & 0x3f;
686 if (ha->qdr_sn_window == window) 702 if (ha->qdr_sn_window == window)
687 return 1; 703 return 1;
688 } 704 }
@@ -690,7 +706,7 @@ static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha,
690 return 0; 706 return 0;
691} 707}
692 708
693static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha, 709static int qla4_82xx_pci_mem_read_direct(struct scsi_qla_host *ha,
694 u64 off, void *data, int size) 710 u64 off, void *data, int size)
695{ 711{
696 unsigned long flags; 712 unsigned long flags;
@@ -707,9 +723,9 @@ static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha,
707 * If attempting to access unknown address or straddle hw windows, 723 * If attempting to access unknown address or straddle hw windows,
708 * do not access. 724 * do not access.
709 */ 725 */
710 start = qla4_8xxx_pci_set_window(ha, off); 726 start = qla4_82xx_pci_set_window(ha, off);
711 if ((start == -1UL) || 727 if ((start == -1UL) ||
712 (qla4_8xxx_pci_is_same_window(ha, off + size - 1) == 0)) { 728 (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
713 write_unlock_irqrestore(&ha->hw_lock, flags); 729 write_unlock_irqrestore(&ha->hw_lock, flags);
714 printk(KERN_ERR"%s out of bound pci memory access. " 730 printk(KERN_ERR"%s out of bound pci memory access. "
715 "offset is 0x%llx\n", DRIVER_NAME, off); 731 "offset is 0x%llx\n", DRIVER_NAME, off);
@@ -763,7 +779,7 @@ static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha,
763} 779}
764 780
765static int 781static int
766qla4_8xxx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off, 782qla4_82xx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
767 void *data, int size) 783 void *data, int size)
768{ 784{
769 unsigned long flags; 785 unsigned long flags;
@@ -780,9 +796,9 @@ qla4_8xxx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
780 * If attempting to access unknown address or straddle hw windows, 796 * If attempting to access unknown address or straddle hw windows,
781 * do not access. 797 * do not access.
782 */ 798 */
783 start = qla4_8xxx_pci_set_window(ha, off); 799 start = qla4_82xx_pci_set_window(ha, off);
784 if ((start == -1UL) || 800 if ((start == -1UL) ||
785 (qla4_8xxx_pci_is_same_window(ha, off + size - 1) == 0)) { 801 (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
786 write_unlock_irqrestore(&ha->hw_lock, flags); 802 write_unlock_irqrestore(&ha->hw_lock, flags);
787 printk(KERN_ERR"%s out of bound pci memory access. " 803 printk(KERN_ERR"%s out of bound pci memory access. "
788 "offset is 0x%llx\n", DRIVER_NAME, off); 804 "offset is 0x%llx\n", DRIVER_NAME, off);
@@ -835,13 +851,13 @@ qla4_8xxx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
835#define MTU_FUDGE_FACTOR 100 851#define MTU_FUDGE_FACTOR 100
836 852
837static unsigned long 853static unsigned long
838qla4_8xxx_decode_crb_addr(unsigned long addr) 854qla4_82xx_decode_crb_addr(unsigned long addr)
839{ 855{
840 int i; 856 int i;
841 unsigned long base_addr, offset, pci_base; 857 unsigned long base_addr, offset, pci_base;
842 858
843 if (!qla4_8xxx_crb_table_initialized) 859 if (!qla4_8xxx_crb_table_initialized)
844 qla4_8xxx_crb_addr_transform_setup(); 860 qla4_82xx_crb_addr_transform_setup();
845 861
846 pci_base = ADDR_ERROR; 862 pci_base = ADDR_ERROR;
847 base_addr = addr & 0xfff00000; 863 base_addr = addr & 0xfff00000;
@@ -860,10 +876,10 @@ qla4_8xxx_decode_crb_addr(unsigned long addr)
860} 876}
861 877
862static long rom_max_timeout = 100; 878static long rom_max_timeout = 100;
863static long qla4_8xxx_rom_lock_timeout = 100; 879static long qla4_82xx_rom_lock_timeout = 100;
864 880
865static int 881static int
866qla4_8xxx_rom_lock(struct scsi_qla_host *ha) 882qla4_82xx_rom_lock(struct scsi_qla_host *ha)
867{ 883{
868 int i; 884 int i;
869 int done = 0, timeout = 0; 885 int done = 0, timeout = 0;
@@ -871,10 +887,10 @@ qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
871 while (!done) { 887 while (!done) {
872 /* acquire semaphore2 from PCI HW block */ 888 /* acquire semaphore2 from PCI HW block */
873 889
874 done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); 890 done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
875 if (done == 1) 891 if (done == 1)
876 break; 892 break;
877 if (timeout >= qla4_8xxx_rom_lock_timeout) 893 if (timeout >= qla4_82xx_rom_lock_timeout)
878 return -1; 894 return -1;
879 895
880 timeout++; 896 timeout++;
@@ -887,24 +903,24 @@ qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
887 cpu_relax(); /*This a nop instr on i386*/ 903 cpu_relax(); /*This a nop instr on i386*/
888 } 904 }
889 } 905 }
890 qla4_8xxx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER); 906 qla4_82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
891 return 0; 907 return 0;
892} 908}
893 909
894static void 910static void
895qla4_8xxx_rom_unlock(struct scsi_qla_host *ha) 911qla4_82xx_rom_unlock(struct scsi_qla_host *ha)
896{ 912{
897 qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); 913 qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
898} 914}
899 915
900static int 916static int
901qla4_8xxx_wait_rom_done(struct scsi_qla_host *ha) 917qla4_82xx_wait_rom_done(struct scsi_qla_host *ha)
902{ 918{
903 long timeout = 0; 919 long timeout = 0;
904 long done = 0 ; 920 long done = 0 ;
905 921
906 while (done == 0) { 922 while (done == 0) {
907 done = qla4_8xxx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); 923 done = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
908 done &= 2; 924 done &= 2;
909 timeout++; 925 timeout++;
910 if (timeout >= rom_max_timeout) { 926 if (timeout >= rom_max_timeout) {
@@ -917,40 +933,41 @@ qla4_8xxx_wait_rom_done(struct scsi_qla_host *ha)
917} 933}
918 934
919static int 935static int
920qla4_8xxx_do_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp) 936qla4_82xx_do_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
921{ 937{
922 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); 938 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
923 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 939 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
924 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); 940 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
925 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb); 941 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
926 if (qla4_8xxx_wait_rom_done(ha)) { 942 if (qla4_82xx_wait_rom_done(ha)) {
927 printk("%s: Error waiting for rom done\n", DRIVER_NAME); 943 printk("%s: Error waiting for rom done\n", DRIVER_NAME);
928 return -1; 944 return -1;
929 } 945 }
930 /* reset abyte_cnt and dummy_byte_cnt */ 946 /* reset abyte_cnt and dummy_byte_cnt */
931 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 947 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
932 udelay(10); 948 udelay(10);
933 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); 949 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
934 950
935 *valp = qla4_8xxx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); 951 *valp = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
936 return 0; 952 return 0;
937} 953}
938 954
939static int 955static int
940qla4_8xxx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp) 956qla4_82xx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
941{ 957{
942 int ret, loops = 0; 958 int ret, loops = 0;
943 959
944 while ((qla4_8xxx_rom_lock(ha) != 0) && (loops < 50000)) { 960 while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
945 udelay(100); 961 udelay(100);
946 loops++; 962 loops++;
947 } 963 }
948 if (loops >= 50000) { 964 if (loops >= 50000) {
949 printk("%s: qla4_8xxx_rom_lock failed\n", DRIVER_NAME); 965 ql4_printk(KERN_WARNING, ha, "%s: qla4_82xx_rom_lock failed\n",
966 DRIVER_NAME);
950 return -1; 967 return -1;
951 } 968 }
952 ret = qla4_8xxx_do_rom_fast_read(ha, addr, valp); 969 ret = qla4_82xx_do_rom_fast_read(ha, addr, valp);
953 qla4_8xxx_rom_unlock(ha); 970 qla4_82xx_rom_unlock(ha);
954 return ret; 971 return ret;
955} 972}
956 973
@@ -959,7 +976,7 @@ qla4_8xxx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
959 * to put the ISP into operational state 976 * to put the ISP into operational state
960 **/ 977 **/
961static int 978static int
962qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose) 979qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
963{ 980{
964 int addr, val; 981 int addr, val;
965 int i ; 982 int i ;
@@ -973,68 +990,68 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
973 }; 990 };
974 991
975 /* Halt all the indiviual PEGs and other blocks of the ISP */ 992 /* Halt all the indiviual PEGs and other blocks of the ISP */
976 qla4_8xxx_rom_lock(ha); 993 qla4_82xx_rom_lock(ha);
977 994
978 /* disable all I2Q */ 995 /* disable all I2Q */
979 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0); 996 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
980 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0); 997 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
981 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0); 998 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
982 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0); 999 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
983 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0); 1000 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
984 qla4_8xxx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0); 1001 qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
985 1002
986 /* disable all niu interrupts */ 1003 /* disable all niu interrupts */
987 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); 1004 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
988 /* disable xge rx/tx */ 1005 /* disable xge rx/tx */
989 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); 1006 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
990 /* disable xg1 rx/tx */ 1007 /* disable xg1 rx/tx */
991 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); 1008 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
992 /* disable sideband mac */ 1009 /* disable sideband mac */
993 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00); 1010 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
994 /* disable ap0 mac */ 1011 /* disable ap0 mac */
995 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00); 1012 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
996 /* disable ap1 mac */ 1013 /* disable ap1 mac */
997 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00); 1014 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
998 1015
999 /* halt sre */ 1016 /* halt sre */
1000 val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); 1017 val = qla4_82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
1001 qla4_8xxx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1))); 1018 qla4_82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
1002 1019
1003 /* halt epg */ 1020 /* halt epg */
1004 qla4_8xxx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1); 1021 qla4_82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
1005 1022
1006 /* halt timers */ 1023 /* halt timers */
1007 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0); 1024 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
1008 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0); 1025 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
1009 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); 1026 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
1010 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); 1027 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
1011 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); 1028 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1012 qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0); 1029 qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
1013 1030
1014 /* halt pegs */ 1031 /* halt pegs */
1015 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); 1032 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
1016 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1); 1033 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
1017 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); 1034 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
1018 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); 1035 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
1019 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); 1036 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1020 msleep(5); 1037 msleep(5);
1021 1038
1022 /* big hammer */ 1039 /* big hammer */
1023 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 1040 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
1024 /* don't reset CAM block on reset */ 1041 /* don't reset CAM block on reset */
1025 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); 1042 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
1026 else 1043 else
1027 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); 1044 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
1028 1045
1029 qla4_8xxx_rom_unlock(ha); 1046 qla4_82xx_rom_unlock(ha);
1030 1047
1031 /* Read the signature value from the flash. 1048 /* Read the signature value from the flash.
1032 * Offset 0: Contain signature (0xcafecafe) 1049 * Offset 0: Contain signature (0xcafecafe)
1033 * Offset 4: Offset and number of addr/value pairs 1050 * Offset 4: Offset and number of addr/value pairs
1034 * that present in CRB initialize sequence 1051 * that present in CRB initialize sequence
1035 */ 1052 */
1036 if (qla4_8xxx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || 1053 if (qla4_82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1037 qla4_8xxx_rom_fast_read(ha, 4, &n) != 0) { 1054 qla4_82xx_rom_fast_read(ha, 4, &n) != 0) {
1038 ql4_printk(KERN_WARNING, ha, 1055 ql4_printk(KERN_WARNING, ha,
1039 "[ERROR] Reading crb_init area: n: %08x\n", n); 1056 "[ERROR] Reading crb_init area: n: %08x\n", n);
1040 return -1; 1057 return -1;
@@ -1065,8 +1082,8 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
1065 } 1082 }
1066 1083
1067 for (i = 0; i < n; i++) { 1084 for (i = 0; i < n; i++) {
1068 if (qla4_8xxx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 || 1085 if (qla4_82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
1069 qla4_8xxx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 1086 qla4_82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) !=
1070 0) { 1087 0) {
1071 kfree(buf); 1088 kfree(buf);
1072 return -1; 1089 return -1;
@@ -1080,7 +1097,7 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
1080 /* Translate internal CRB initialization 1097 /* Translate internal CRB initialization
1081 * address to PCI bus address 1098 * address to PCI bus address
1082 */ 1099 */
1083 off = qla4_8xxx_decode_crb_addr((unsigned long)buf[i].addr) + 1100 off = qla4_82xx_decode_crb_addr((unsigned long)buf[i].addr) +
1084 QLA82XX_PCI_CRBSPACE; 1101 QLA82XX_PCI_CRBSPACE;
1085 /* Not all CRB addr/value pair to be written, 1102 /* Not all CRB addr/value pair to be written,
1086 * some of them are skipped 1103 * some of them are skipped
@@ -1125,7 +1142,7 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
1125 continue; 1142 continue;
1126 } 1143 }
1127 1144
1128 qla4_8xxx_wr_32(ha, off, buf[i].data); 1145 qla4_82xx_wr_32(ha, off, buf[i].data);
1129 1146
1130 /* ISP requires much bigger delay to settle down, 1147 /* ISP requires much bigger delay to settle down,
1131 * else crb_window returns 0xffffffff 1148 * else crb_window returns 0xffffffff
@@ -1142,25 +1159,25 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
1142 kfree(buf); 1159 kfree(buf);
1143 1160
1144 /* Resetting the data and instruction cache */ 1161 /* Resetting the data and instruction cache */
1145 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e); 1162 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
1146 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8); 1163 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
1147 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8); 1164 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
1148 1165
1149 /* Clear all protocol processing engines */ 1166 /* Clear all protocol processing engines */
1150 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0); 1167 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
1151 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0); 1168 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
1152 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0); 1169 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
1153 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0); 1170 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
1154 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0); 1171 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
1155 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0); 1172 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
1156 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0); 1173 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
1157 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0); 1174 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
1158 1175
1159 return 0; 1176 return 0;
1160} 1177}
1161 1178
1162static int 1179static int
1163qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start) 1180qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
1164{ 1181{
1165 int i, rval = 0; 1182 int i, rval = 0;
1166 long size = 0; 1183 long size = 0;
@@ -1175,14 +1192,14 @@ qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
1175 ha->host_no, __func__, flashaddr, image_start)); 1192 ha->host_no, __func__, flashaddr, image_start));
1176 1193
1177 for (i = 0; i < size; i++) { 1194 for (i = 0; i < size; i++) {
1178 if ((qla4_8xxx_rom_fast_read(ha, flashaddr, (int *)&low)) || 1195 if ((qla4_82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1179 (qla4_8xxx_rom_fast_read(ha, flashaddr + 4, 1196 (qla4_82xx_rom_fast_read(ha, flashaddr + 4,
1180 (int *)&high))) { 1197 (int *)&high))) {
1181 rval = -1; 1198 rval = -1;
1182 goto exit_load_from_flash; 1199 goto exit_load_from_flash;
1183 } 1200 }
1184 data = ((u64)high << 32) | low ; 1201 data = ((u64)high << 32) | low ;
1185 rval = qla4_8xxx_pci_mem_write_2M(ha, memaddr, &data, 8); 1202 rval = qla4_82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
1186 if (rval) 1203 if (rval)
1187 goto exit_load_from_flash; 1204 goto exit_load_from_flash;
1188 1205
@@ -1197,20 +1214,20 @@ qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
1197 udelay(100); 1214 udelay(100);
1198 1215
1199 read_lock(&ha->hw_lock); 1216 read_lock(&ha->hw_lock);
1200 qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); 1217 qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1201 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); 1218 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1202 read_unlock(&ha->hw_lock); 1219 read_unlock(&ha->hw_lock);
1203 1220
1204exit_load_from_flash: 1221exit_load_from_flash:
1205 return rval; 1222 return rval;
1206} 1223}
1207 1224
1208static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start) 1225static int qla4_82xx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
1209{ 1226{
1210 u32 rst; 1227 u32 rst;
1211 1228
1212 qla4_8xxx_wr_32(ha, CRB_CMDPEG_STATE, 0); 1229 qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
1213 if (qla4_8xxx_pinit_from_rom(ha, 0) != QLA_SUCCESS) { 1230 if (qla4_82xx_pinit_from_rom(ha, 0) != QLA_SUCCESS) {
1214 printk(KERN_WARNING "%s: Error during CRB Initialization\n", 1231 printk(KERN_WARNING "%s: Error during CRB Initialization\n",
1215 __func__); 1232 __func__);
1216 return QLA_ERROR; 1233 return QLA_ERROR;
@@ -1223,12 +1240,12 @@ static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
1223 * To get around this, QM is brought out of reset. 1240 * To get around this, QM is brought out of reset.
1224 */ 1241 */
1225 1242
1226 rst = qla4_8xxx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET); 1243 rst = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
1227 /* unreset qm */ 1244 /* unreset qm */
1228 rst &= ~(1 << 28); 1245 rst &= ~(1 << 28);
1229 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst); 1246 qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
1230 1247
1231 if (qla4_8xxx_load_from_flash(ha, image_start)) { 1248 if (qla4_82xx_load_from_flash(ha, image_start)) {
1232 printk("%s: Error trying to load fw from flash!\n", __func__); 1249 printk("%s: Error trying to load fw from flash!\n", __func__);
1233 return QLA_ERROR; 1250 return QLA_ERROR;
1234 } 1251 }
@@ -1237,7 +1254,7 @@ static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
1237} 1254}
1238 1255
1239int 1256int
1240qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha, 1257qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *ha,
1241 u64 off, void *data, int size) 1258 u64 off, void *data, int size)
1242{ 1259{
1243 int i, j = 0, k, start, end, loop, sz[2], off0[2]; 1260 int i, j = 0, k, start, end, loop, sz[2], off0[2];
@@ -1249,12 +1266,12 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
1249 * If not MN, go check for MS or invalid. 1266 * If not MN, go check for MS or invalid.
1250 */ 1267 */
1251 1268
1252 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) 1269 if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1253 mem_crb = QLA82XX_CRB_QDR_NET; 1270 mem_crb = QLA82XX_CRB_QDR_NET;
1254 else { 1271 else {
1255 mem_crb = QLA82XX_CRB_DDR_NET; 1272 mem_crb = QLA82XX_CRB_DDR_NET;
1256 if (qla4_8xxx_pci_mem_bound_check(ha, off, size) == 0) 1273 if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0)
1257 return qla4_8xxx_pci_mem_read_direct(ha, 1274 return qla4_82xx_pci_mem_read_direct(ha,
1258 off, data, size); 1275 off, data, size);
1259 } 1276 }
1260 1277
@@ -1270,16 +1287,16 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
1270 1287
1271 for (i = 0; i < loop; i++) { 1288 for (i = 0; i < loop; i++) {
1272 temp = off8 + (i << shift_amount); 1289 temp = off8 + (i << shift_amount);
1273 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp); 1290 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
1274 temp = 0; 1291 temp = 0;
1275 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp); 1292 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
1276 temp = MIU_TA_CTL_ENABLE; 1293 temp = MIU_TA_CTL_ENABLE;
1277 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); 1294 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1278 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; 1295 temp = MIU_TA_CTL_START_ENABLE;
1279 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); 1296 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1280 1297
1281 for (j = 0; j < MAX_CTL_CHECK; j++) { 1298 for (j = 0; j < MAX_CTL_CHECK; j++) {
1282 temp = qla4_8xxx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); 1299 temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1283 if ((temp & MIU_TA_CTL_BUSY) == 0) 1300 if ((temp & MIU_TA_CTL_BUSY) == 0)
1284 break; 1301 break;
1285 } 1302 }
@@ -1294,7 +1311,7 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
1294 start = off0[i] >> 2; 1311 start = off0[i] >> 2;
1295 end = (off0[i] + sz[i] - 1) >> 2; 1312 end = (off0[i] + sz[i] - 1) >> 2;
1296 for (k = start; k <= end; k++) { 1313 for (k = start; k <= end; k++) {
1297 temp = qla4_8xxx_rd_32(ha, 1314 temp = qla4_82xx_rd_32(ha,
1298 mem_crb + MIU_TEST_AGT_RDDATA(k)); 1315 mem_crb + MIU_TEST_AGT_RDDATA(k));
1299 word[i] |= ((uint64_t)temp << (32 * (k & 1))); 1316 word[i] |= ((uint64_t)temp << (32 * (k & 1)));
1300 } 1317 }
@@ -1328,7 +1345,7 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
1328} 1345}
1329 1346
1330int 1347int
1331qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha, 1348qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha,
1332 u64 off, void *data, int size) 1349 u64 off, void *data, int size)
1333{ 1350{
1334 int i, j, ret = 0, loop, sz[2], off0; 1351 int i, j, ret = 0, loop, sz[2], off0;
@@ -1339,12 +1356,12 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
1339 /* 1356 /*
1340 * If not MN, go check for MS or invalid. 1357 * If not MN, go check for MS or invalid.
1341 */ 1358 */
1342 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) 1359 if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1343 mem_crb = QLA82XX_CRB_QDR_NET; 1360 mem_crb = QLA82XX_CRB_QDR_NET;
1344 else { 1361 else {
1345 mem_crb = QLA82XX_CRB_DDR_NET; 1362 mem_crb = QLA82XX_CRB_DDR_NET;
1346 if (qla4_8xxx_pci_mem_bound_check(ha, off, size) == 0) 1363 if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0)
1347 return qla4_8xxx_pci_mem_write_direct(ha, 1364 return qla4_82xx_pci_mem_write_direct(ha,
1348 off, data, size); 1365 off, data, size);
1349 } 1366 }
1350 1367
@@ -1359,7 +1376,7 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
1359 startword = (off & 0xf)/8; 1376 startword = (off & 0xf)/8;
1360 1377
1361 for (i = 0; i < loop; i++) { 1378 for (i = 0; i < loop; i++) {
1362 if (qla4_8xxx_pci_mem_read_2M(ha, off8 + 1379 if (qla4_82xx_pci_mem_read_2M(ha, off8 +
1363 (i << shift_amount), &word[i * scale], 8)) 1380 (i << shift_amount), &word[i * scale], 8))
1364 return -1; 1381 return -1;
1365 } 1382 }
@@ -1395,27 +1412,27 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
1395 1412
1396 for (i = 0; i < loop; i++) { 1413 for (i = 0; i < loop; i++) {
1397 temp = off8 + (i << shift_amount); 1414 temp = off8 + (i << shift_amount);
1398 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); 1415 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1399 temp = 0; 1416 temp = 0;
1400 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp); 1417 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1401 temp = word[i * scale] & 0xffffffff; 1418 temp = word[i * scale] & 0xffffffff;
1402 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp); 1419 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1403 temp = (word[i * scale] >> 32) & 0xffffffff; 1420 temp = (word[i * scale] >> 32) & 0xffffffff;
1404 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp); 1421 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1405 temp = word[i*scale + 1] & 0xffffffff; 1422 temp = word[i*scale + 1] & 0xffffffff;
1406 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO, 1423 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO,
1407 temp); 1424 temp);
1408 temp = (word[i*scale + 1] >> 32) & 0xffffffff; 1425 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1409 qla4_8xxx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI, 1426 qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI,
1410 temp); 1427 temp);
1411 1428
1412 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; 1429 temp = MIU_TA_CTL_WRITE_ENABLE;
1413 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp); 1430 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
1414 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; 1431 temp = MIU_TA_CTL_WRITE_START;
1415 qla4_8xxx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp); 1432 qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
1416 1433
1417 for (j = 0; j < MAX_CTL_CHECK; j++) { 1434 for (j = 0; j < MAX_CTL_CHECK; j++) {
1418 temp = qla4_8xxx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); 1435 temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1419 if ((temp & MIU_TA_CTL_BUSY) == 0) 1436 if ((temp & MIU_TA_CTL_BUSY) == 0)
1420 break; 1437 break;
1421 } 1438 }
@@ -1433,14 +1450,14 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
1433 return ret; 1450 return ret;
1434} 1451}
1435 1452
1436static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val) 1453static int qla4_82xx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
1437{ 1454{
1438 u32 val = 0; 1455 u32 val = 0;
1439 int retries = 60; 1456 int retries = 60;
1440 1457
1441 if (!pegtune_val) { 1458 if (!pegtune_val) {
1442 do { 1459 do {
1443 val = qla4_8xxx_rd_32(ha, CRB_CMDPEG_STATE); 1460 val = qla4_82xx_rd_32(ha, CRB_CMDPEG_STATE);
1444 if ((val == PHAN_INITIALIZE_COMPLETE) || 1461 if ((val == PHAN_INITIALIZE_COMPLETE) ||
1445 (val == PHAN_INITIALIZE_ACK)) 1462 (val == PHAN_INITIALIZE_ACK))
1446 return 0; 1463 return 0;
@@ -1450,7 +1467,7 @@ static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
1450 } while (--retries); 1467 } while (--retries);
1451 1468
1452 if (!retries) { 1469 if (!retries) {
1453 pegtune_val = qla4_8xxx_rd_32(ha, 1470 pegtune_val = qla4_82xx_rd_32(ha,
1454 QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); 1471 QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
1455 printk(KERN_WARNING "%s: init failed, " 1472 printk(KERN_WARNING "%s: init failed, "
1456 "pegtune_val = %x\n", __func__, pegtune_val); 1473 "pegtune_val = %x\n", __func__, pegtune_val);
@@ -1460,21 +1477,21 @@ static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
1460 return 0; 1477 return 0;
1461} 1478}
1462 1479
1463static int qla4_8xxx_rcvpeg_ready(struct scsi_qla_host *ha) 1480static int qla4_82xx_rcvpeg_ready(struct scsi_qla_host *ha)
1464{ 1481{
1465 uint32_t state = 0; 1482 uint32_t state = 0;
1466 int loops = 0; 1483 int loops = 0;
1467 1484
1468 /* Window 1 call */ 1485 /* Window 1 call */
1469 read_lock(&ha->hw_lock); 1486 read_lock(&ha->hw_lock);
1470 state = qla4_8xxx_rd_32(ha, CRB_RCVPEG_STATE); 1487 state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE);
1471 read_unlock(&ha->hw_lock); 1488 read_unlock(&ha->hw_lock);
1472 1489
1473 while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 30000)) { 1490 while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 30000)) {
1474 udelay(100); 1491 udelay(100);
1475 /* Window 1 call */ 1492 /* Window 1 call */
1476 read_lock(&ha->hw_lock); 1493 read_lock(&ha->hw_lock);
1477 state = qla4_8xxx_rd_32(ha, CRB_RCVPEG_STATE); 1494 state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE);
1478 read_unlock(&ha->hw_lock); 1495 read_unlock(&ha->hw_lock);
1479 1496
1480 loops++; 1497 loops++;
@@ -1494,11 +1511,21 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
1494{ 1511{
1495 uint32_t drv_active; 1512 uint32_t drv_active;
1496 1513
1497 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 1514 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1498 drv_active |= (1 << (ha->func_num * 4)); 1515
1516 /*
1517 * For ISP8324, drv_active register has 1 bit per function,
1518 * shift 1 by func_num to set a bit for the function.
1519 * For ISP8022, drv_active has 4 bits per function
1520 */
1521 if (is_qla8032(ha))
1522 drv_active |= (1 << ha->func_num);
1523 else
1524 drv_active |= (1 << (ha->func_num * 4));
1525
1499 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n", 1526 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
1500 __func__, ha->host_no, drv_active); 1527 __func__, ha->host_no, drv_active);
1501 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 1528 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active);
1502} 1529}
1503 1530
1504void 1531void
@@ -1506,50 +1533,87 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
1506{ 1533{
1507 uint32_t drv_active; 1534 uint32_t drv_active;
1508 1535
1509 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 1536 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1510 drv_active &= ~(1 << (ha->func_num * 4)); 1537
1538 /*
1539 * For ISP8324, drv_active register has 1 bit per function,
1540 * shift 1 by func_num to set a bit for the function.
1541 * For ISP8022, drv_active has 4 bits per function
1542 */
1543 if (is_qla8032(ha))
1544 drv_active &= ~(1 << (ha->func_num));
1545 else
1546 drv_active &= ~(1 << (ha->func_num * 4));
1547
1511 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n", 1548 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
1512 __func__, ha->host_no, drv_active); 1549 __func__, ha->host_no, drv_active);
1513 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 1550 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active);
1514} 1551}
1515 1552
1516static inline int 1553inline int qla4_8xxx_need_reset(struct scsi_qla_host *ha)
1517qla4_8xxx_need_reset(struct scsi_qla_host *ha)
1518{ 1554{
1519 uint32_t drv_state, drv_active; 1555 uint32_t drv_state, drv_active;
1520 int rval; 1556 int rval;
1521 1557
1522 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 1558 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1523 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 1559 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1524 rval = drv_state & (1 << (ha->func_num * 4)); 1560
1561 /*
1562 * For ISP8324, drv_active register has 1 bit per function,
1563 * shift 1 by func_num to set a bit for the function.
1564 * For ISP8022, drv_active has 4 bits per function
1565 */
1566 if (is_qla8032(ha))
1567 rval = drv_state & (1 << ha->func_num);
1568 else
1569 rval = drv_state & (1 << (ha->func_num * 4));
1570
1525 if ((test_bit(AF_EEH_BUSY, &ha->flags)) && drv_active) 1571 if ((test_bit(AF_EEH_BUSY, &ha->flags)) && drv_active)
1526 rval = 1; 1572 rval = 1;
1527 1573
1528 return rval; 1574 return rval;
1529} 1575}
1530 1576
1531static inline void 1577void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
1532qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
1533{ 1578{
1534 uint32_t drv_state; 1579 uint32_t drv_state;
1535 1580
1536 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 1581 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1537 drv_state |= (1 << (ha->func_num * 4)); 1582
1583 /*
1584 * For ISP8324, drv_active register has 1 bit per function,
1585 * shift 1 by func_num to set a bit for the function.
1586 * For ISP8022, drv_active has 4 bits per function
1587 */
1588 if (is_qla8032(ha))
1589 drv_state |= (1 << ha->func_num);
1590 else
1591 drv_state |= (1 << (ha->func_num * 4));
1592
1538 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n", 1593 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
1539 __func__, ha->host_no, drv_state); 1594 __func__, ha->host_no, drv_state);
1540 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 1595 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state);
1541} 1596}
1542 1597
1543static inline void 1598void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
1544qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
1545{ 1599{
1546 uint32_t drv_state; 1600 uint32_t drv_state;
1547 1601
1548 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 1602 drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1549 drv_state &= ~(1 << (ha->func_num * 4)); 1603
1604 /*
1605 * For ISP8324, drv_active register has 1 bit per function,
1606 * shift 1 by func_num to set a bit for the function.
1607 * For ISP8022, drv_active has 4 bits per function
1608 */
1609 if (is_qla8032(ha))
1610 drv_state &= ~(1 << ha->func_num);
1611 else
1612 drv_state &= ~(1 << (ha->func_num * 4));
1613
1550 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n", 1614 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
1551 __func__, ha->host_no, drv_state); 1615 __func__, ha->host_no, drv_state);
1552 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 1616 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state);
1553} 1617}
1554 1618
1555static inline void 1619static inline void
@@ -1557,33 +1621,43 @@ qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha)
1557{ 1621{
1558 uint32_t qsnt_state; 1622 uint32_t qsnt_state;
1559 1623
1560 qsnt_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 1624 qsnt_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1561 qsnt_state |= (2 << (ha->func_num * 4)); 1625
1562 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); 1626 /*
1627 * For ISP8324, drv_active register has 1 bit per function,
1628 * shift 1 by func_num to set a bit for the function.
1629 * For ISP8022, drv_active has 4 bits per function.
1630 */
1631 if (is_qla8032(ha))
1632 qsnt_state |= (1 << ha->func_num);
1633 else
1634 qsnt_state |= (2 << (ha->func_num * 4));
1635
1636 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, qsnt_state);
1563} 1637}
1564 1638
1565 1639
1566static int 1640static int
1567qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start) 1641qla4_82xx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
1568{ 1642{
1569 uint16_t lnk; 1643 uint16_t lnk;
1570 1644
1571 /* scrub dma mask expansion register */ 1645 /* scrub dma mask expansion register */
1572 qla4_8xxx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555); 1646 qla4_82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
1573 1647
1574 /* Overwrite stale initialization register values */ 1648 /* Overwrite stale initialization register values */
1575 qla4_8xxx_wr_32(ha, CRB_CMDPEG_STATE, 0); 1649 qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
1576 qla4_8xxx_wr_32(ha, CRB_RCVPEG_STATE, 0); 1650 qla4_82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
1577 qla4_8xxx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0); 1651 qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
1578 qla4_8xxx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); 1652 qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
1579 1653
1580 if (qla4_8xxx_load_fw(ha, image_start) != QLA_SUCCESS) { 1654 if (qla4_82xx_load_fw(ha, image_start) != QLA_SUCCESS) {
1581 printk("%s: Error trying to start fw!\n", __func__); 1655 printk("%s: Error trying to start fw!\n", __func__);
1582 return QLA_ERROR; 1656 return QLA_ERROR;
1583 } 1657 }
1584 1658
1585 /* Handshake with the card before we register the devices. */ 1659 /* Handshake with the card before we register the devices. */
1586 if (qla4_8xxx_cmdpeg_ready(ha, 0) != QLA_SUCCESS) { 1660 if (qla4_82xx_cmdpeg_ready(ha, 0) != QLA_SUCCESS) {
1587 printk("%s: Error during card handshake!\n", __func__); 1661 printk("%s: Error during card handshake!\n", __func__);
1588 return QLA_ERROR; 1662 return QLA_ERROR;
1589 } 1663 }
@@ -1593,11 +1667,10 @@ qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
1593 ha->link_width = (lnk >> 4) & 0x3f; 1667 ha->link_width = (lnk >> 4) & 0x3f;
1594 1668
1595 /* Synchronize with Receive peg */ 1669 /* Synchronize with Receive peg */
1596 return qla4_8xxx_rcvpeg_ready(ha); 1670 return qla4_82xx_rcvpeg_ready(ha);
1597} 1671}
1598 1672
1599static int 1673int qla4_82xx_try_start_fw(struct scsi_qla_host *ha)
1600qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
1601{ 1674{
1602 int rval = QLA_ERROR; 1675 int rval = QLA_ERROR;
1603 1676
@@ -1615,7 +1688,7 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
1615 1688
1616 ql4_printk(KERN_INFO, ha, 1689 ql4_printk(KERN_INFO, ha,
1617 "FW: Attempting to load firmware from flash...\n"); 1690 "FW: Attempting to load firmware from flash...\n");
1618 rval = qla4_8xxx_start_firmware(ha, ha->hw.flt_region_fw); 1691 rval = qla4_82xx_start_firmware(ha, ha->hw.flt_region_fw);
1619 1692
1620 if (rval != QLA_SUCCESS) { 1693 if (rval != QLA_SUCCESS) {
1621 ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash" 1694 ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash"
@@ -1626,9 +1699,9 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha)
1626 return rval; 1699 return rval;
1627} 1700}
1628 1701
1629static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha) 1702void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha)
1630{ 1703{
1631 if (qla4_8xxx_rom_lock(ha)) { 1704 if (qla4_82xx_rom_lock(ha)) {
1632 /* Someone else is holding the lock. */ 1705 /* Someone else is holding the lock. */
1633 dev_info(&ha->pdev->dev, "Resetting rom_lock\n"); 1706 dev_info(&ha->pdev->dev, "Resetting rom_lock\n");
1634 } 1707 }
@@ -1638,25 +1711,25 @@ static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
1638 * else died while holding it. 1711 * else died while holding it.
1639 * In either case, unlock. 1712 * In either case, unlock.
1640 */ 1713 */
1641 qla4_8xxx_rom_unlock(ha); 1714 qla4_82xx_rom_unlock(ha);
1642} 1715}
1643 1716
1644static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha, 1717static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
1645 struct qla82xx_minidump_entry_hdr *entry_hdr, 1718 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1646 uint32_t **d_ptr) 1719 uint32_t **d_ptr)
1647{ 1720{
1648 uint32_t r_addr, r_stride, loop_cnt, i, r_value; 1721 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
1649 struct qla82xx_minidump_entry_crb *crb_hdr; 1722 struct qla8xxx_minidump_entry_crb *crb_hdr;
1650 uint32_t *data_ptr = *d_ptr; 1723 uint32_t *data_ptr = *d_ptr;
1651 1724
1652 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); 1725 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1653 crb_hdr = (struct qla82xx_minidump_entry_crb *)entry_hdr; 1726 crb_hdr = (struct qla8xxx_minidump_entry_crb *)entry_hdr;
1654 r_addr = crb_hdr->addr; 1727 r_addr = crb_hdr->addr;
1655 r_stride = crb_hdr->crb_strd.addr_stride; 1728 r_stride = crb_hdr->crb_strd.addr_stride;
1656 loop_cnt = crb_hdr->op_count; 1729 loop_cnt = crb_hdr->op_count;
1657 1730
1658 for (i = 0; i < loop_cnt; i++) { 1731 for (i = 0; i < loop_cnt; i++) {
1659 r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0); 1732 ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
1660 *data_ptr++ = cpu_to_le32(r_addr); 1733 *data_ptr++ = cpu_to_le32(r_addr);
1661 *data_ptr++ = cpu_to_le32(r_value); 1734 *data_ptr++ = cpu_to_le32(r_value);
1662 r_addr += r_stride; 1735 r_addr += r_stride;
@@ -1665,19 +1738,19 @@ static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
1665} 1738}
1666 1739
1667static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha, 1740static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
1668 struct qla82xx_minidump_entry_hdr *entry_hdr, 1741 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1669 uint32_t **d_ptr) 1742 uint32_t **d_ptr)
1670{ 1743{
1671 uint32_t addr, r_addr, c_addr, t_r_addr; 1744 uint32_t addr, r_addr, c_addr, t_r_addr;
1672 uint32_t i, k, loop_count, t_value, r_cnt, r_value; 1745 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
1673 unsigned long p_wait, w_time, p_mask; 1746 unsigned long p_wait, w_time, p_mask;
1674 uint32_t c_value_w, c_value_r; 1747 uint32_t c_value_w, c_value_r;
1675 struct qla82xx_minidump_entry_cache *cache_hdr; 1748 struct qla8xxx_minidump_entry_cache *cache_hdr;
1676 int rval = QLA_ERROR; 1749 int rval = QLA_ERROR;
1677 uint32_t *data_ptr = *d_ptr; 1750 uint32_t *data_ptr = *d_ptr;
1678 1751
1679 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); 1752 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1680 cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr; 1753 cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr;
1681 1754
1682 loop_count = cache_hdr->op_count; 1755 loop_count = cache_hdr->op_count;
1683 r_addr = cache_hdr->read_addr; 1756 r_addr = cache_hdr->read_addr;
@@ -1691,16 +1764,16 @@ static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
1691 p_mask = cache_hdr->cache_ctrl.poll_mask; 1764 p_mask = cache_hdr->cache_ctrl.poll_mask;
1692 1765
1693 for (i = 0; i < loop_count; i++) { 1766 for (i = 0; i < loop_count; i++) {
1694 qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1); 1767 ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value);
1695 1768
1696 if (c_value_w) 1769 if (c_value_w)
1697 qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1); 1770 ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w);
1698 1771
1699 if (p_mask) { 1772 if (p_mask) {
1700 w_time = jiffies + p_wait; 1773 w_time = jiffies + p_wait;
1701 do { 1774 do {
1702 c_value_r = qla4_8xxx_md_rw_32(ha, c_addr, 1775 ha->isp_ops->rd_reg_indirect(ha, c_addr,
1703 0, 0); 1776 &c_value_r);
1704 if ((c_value_r & p_mask) == 0) { 1777 if ((c_value_r & p_mask) == 0) {
1705 break; 1778 break;
1706 } else if (time_after_eq(jiffies, w_time)) { 1779 } else if (time_after_eq(jiffies, w_time)) {
@@ -1712,7 +1785,7 @@ static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
1712 1785
1713 addr = r_addr; 1786 addr = r_addr;
1714 for (k = 0; k < r_cnt; k++) { 1787 for (k = 0; k < r_cnt; k++) {
1715 r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0); 1788 ha->isp_ops->rd_reg_indirect(ha, addr, &r_value);
1716 *data_ptr++ = cpu_to_le32(r_value); 1789 *data_ptr++ = cpu_to_le32(r_value);
1717 addr += cache_hdr->read_ctrl.read_addr_stride; 1790 addr += cache_hdr->read_ctrl.read_addr_stride;
1718 } 1791 }
@@ -1724,9 +1797,9 @@ static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
1724} 1797}
1725 1798
1726static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha, 1799static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
1727 struct qla82xx_minidump_entry_hdr *entry_hdr) 1800 struct qla8xxx_minidump_entry_hdr *entry_hdr)
1728{ 1801{
1729 struct qla82xx_minidump_entry_crb *crb_entry; 1802 struct qla8xxx_minidump_entry_crb *crb_entry;
1730 uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS; 1803 uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
1731 uint32_t crb_addr; 1804 uint32_t crb_addr;
1732 unsigned long wtime; 1805 unsigned long wtime;
@@ -1736,58 +1809,59 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
1736 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); 1809 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1737 tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *) 1810 tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
1738 ha->fw_dump_tmplt_hdr; 1811 ha->fw_dump_tmplt_hdr;
1739 crb_entry = (struct qla82xx_minidump_entry_crb *)entry_hdr; 1812 crb_entry = (struct qla8xxx_minidump_entry_crb *)entry_hdr;
1740 1813
1741 crb_addr = crb_entry->addr; 1814 crb_addr = crb_entry->addr;
1742 for (i = 0; i < crb_entry->op_count; i++) { 1815 for (i = 0; i < crb_entry->op_count; i++) {
1743 opcode = crb_entry->crb_ctrl.opcode; 1816 opcode = crb_entry->crb_ctrl.opcode;
1744 if (opcode & QLA82XX_DBG_OPCODE_WR) { 1817 if (opcode & QLA8XXX_DBG_OPCODE_WR) {
1745 qla4_8xxx_md_rw_32(ha, crb_addr, 1818 ha->isp_ops->wr_reg_indirect(ha, crb_addr,
1746 crb_entry->value_1, 1); 1819 crb_entry->value_1);
1747 opcode &= ~QLA82XX_DBG_OPCODE_WR; 1820 opcode &= ~QLA8XXX_DBG_OPCODE_WR;
1748 } 1821 }
1749 if (opcode & QLA82XX_DBG_OPCODE_RW) { 1822 if (opcode & QLA8XXX_DBG_OPCODE_RW) {
1750 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0); 1823 ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
1751 qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1); 1824 ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
1752 opcode &= ~QLA82XX_DBG_OPCODE_RW; 1825 opcode &= ~QLA8XXX_DBG_OPCODE_RW;
1753 } 1826 }
1754 if (opcode & QLA82XX_DBG_OPCODE_AND) { 1827 if (opcode & QLA8XXX_DBG_OPCODE_AND) {
1755 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0); 1828 ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
1756 read_value &= crb_entry->value_2; 1829 read_value &= crb_entry->value_2;
1757 opcode &= ~QLA82XX_DBG_OPCODE_AND; 1830 opcode &= ~QLA8XXX_DBG_OPCODE_AND;
1758 if (opcode & QLA82XX_DBG_OPCODE_OR) { 1831 if (opcode & QLA8XXX_DBG_OPCODE_OR) {
1759 read_value |= crb_entry->value_3; 1832 read_value |= crb_entry->value_3;
1760 opcode &= ~QLA82XX_DBG_OPCODE_OR; 1833 opcode &= ~QLA8XXX_DBG_OPCODE_OR;
1761 } 1834 }
1762 qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1); 1835 ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
1763 } 1836 }
1764 if (opcode & QLA82XX_DBG_OPCODE_OR) { 1837 if (opcode & QLA8XXX_DBG_OPCODE_OR) {
1765 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0); 1838 ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
1766 read_value |= crb_entry->value_3; 1839 read_value |= crb_entry->value_3;
1767 qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1); 1840 ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
1768 opcode &= ~QLA82XX_DBG_OPCODE_OR; 1841 opcode &= ~QLA8XXX_DBG_OPCODE_OR;
1769 } 1842 }
1770 if (opcode & QLA82XX_DBG_OPCODE_POLL) { 1843 if (opcode & QLA8XXX_DBG_OPCODE_POLL) {
1771 poll_time = crb_entry->crb_strd.poll_timeout; 1844 poll_time = crb_entry->crb_strd.poll_timeout;
1772 wtime = jiffies + poll_time; 1845 wtime = jiffies + poll_time;
1773 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0); 1846 ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
1774 1847
1775 do { 1848 do {
1776 if ((read_value & crb_entry->value_2) == 1849 if ((read_value & crb_entry->value_2) ==
1777 crb_entry->value_1) 1850 crb_entry->value_1) {
1778 break; 1851 break;
1779 else if (time_after_eq(jiffies, wtime)) { 1852 } else if (time_after_eq(jiffies, wtime)) {
1780 /* capturing dump failed */ 1853 /* capturing dump failed */
1781 rval = QLA_ERROR; 1854 rval = QLA_ERROR;
1782 break; 1855 break;
1783 } else 1856 } else {
1784 read_value = qla4_8xxx_md_rw_32(ha, 1857 ha->isp_ops->rd_reg_indirect(ha,
1785 crb_addr, 0, 0); 1858 crb_addr, &read_value);
1859 }
1786 } while (1); 1860 } while (1);
1787 opcode &= ~QLA82XX_DBG_OPCODE_POLL; 1861 opcode &= ~QLA8XXX_DBG_OPCODE_POLL;
1788 } 1862 }
1789 1863
1790 if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) { 1864 if (opcode & QLA8XXX_DBG_OPCODE_RDSTATE) {
1791 if (crb_entry->crb_strd.state_index_a) { 1865 if (crb_entry->crb_strd.state_index_a) {
1792 index = crb_entry->crb_strd.state_index_a; 1866 index = crb_entry->crb_strd.state_index_a;
1793 addr = tmplt_hdr->saved_state_array[index]; 1867 addr = tmplt_hdr->saved_state_array[index];
@@ -1795,13 +1869,13 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
1795 addr = crb_addr; 1869 addr = crb_addr;
1796 } 1870 }
1797 1871
1798 read_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0); 1872 ha->isp_ops->rd_reg_indirect(ha, addr, &read_value);
1799 index = crb_entry->crb_ctrl.state_index_v; 1873 index = crb_entry->crb_ctrl.state_index_v;
1800 tmplt_hdr->saved_state_array[index] = read_value; 1874 tmplt_hdr->saved_state_array[index] = read_value;
1801 opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE; 1875 opcode &= ~QLA8XXX_DBG_OPCODE_RDSTATE;
1802 } 1876 }
1803 1877
1804 if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) { 1878 if (opcode & QLA8XXX_DBG_OPCODE_WRSTATE) {
1805 if (crb_entry->crb_strd.state_index_a) { 1879 if (crb_entry->crb_strd.state_index_a) {
1806 index = crb_entry->crb_strd.state_index_a; 1880 index = crb_entry->crb_strd.state_index_a;
1807 addr = tmplt_hdr->saved_state_array[index]; 1881 addr = tmplt_hdr->saved_state_array[index];
@@ -1817,11 +1891,11 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
1817 read_value = crb_entry->value_1; 1891 read_value = crb_entry->value_1;
1818 } 1892 }
1819 1893
1820 qla4_8xxx_md_rw_32(ha, addr, read_value, 1); 1894 ha->isp_ops->wr_reg_indirect(ha, addr, read_value);
1821 opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE; 1895 opcode &= ~QLA8XXX_DBG_OPCODE_WRSTATE;
1822 } 1896 }
1823 1897
1824 if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) { 1898 if (opcode & QLA8XXX_DBG_OPCODE_MDSTATE) {
1825 index = crb_entry->crb_ctrl.state_index_v; 1899 index = crb_entry->crb_ctrl.state_index_v;
1826 read_value = tmplt_hdr->saved_state_array[index]; 1900 read_value = tmplt_hdr->saved_state_array[index];
1827 read_value <<= crb_entry->crb_ctrl.shl; 1901 read_value <<= crb_entry->crb_ctrl.shl;
@@ -1831,7 +1905,7 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
1831 read_value |= crb_entry->value_3; 1905 read_value |= crb_entry->value_3;
1832 read_value += crb_entry->value_1; 1906 read_value += crb_entry->value_1;
1833 tmplt_hdr->saved_state_array[index] = read_value; 1907 tmplt_hdr->saved_state_array[index] = read_value;
1834 opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE; 1908 opcode &= ~QLA8XXX_DBG_OPCODE_MDSTATE;
1835 } 1909 }
1836 crb_addr += crb_entry->crb_strd.addr_stride; 1910 crb_addr += crb_entry->crb_strd.addr_stride;
1837 } 1911 }
@@ -1840,15 +1914,15 @@ static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
1840} 1914}
1841 1915
1842static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha, 1916static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
1843 struct qla82xx_minidump_entry_hdr *entry_hdr, 1917 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1844 uint32_t **d_ptr) 1918 uint32_t **d_ptr)
1845{ 1919{
1846 uint32_t r_addr, r_stride, loop_cnt, i, r_value; 1920 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
1847 struct qla82xx_minidump_entry_rdocm *ocm_hdr; 1921 struct qla8xxx_minidump_entry_rdocm *ocm_hdr;
1848 uint32_t *data_ptr = *d_ptr; 1922 uint32_t *data_ptr = *d_ptr;
1849 1923
1850 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); 1924 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1851 ocm_hdr = (struct qla82xx_minidump_entry_rdocm *)entry_hdr; 1925 ocm_hdr = (struct qla8xxx_minidump_entry_rdocm *)entry_hdr;
1852 r_addr = ocm_hdr->read_addr; 1926 r_addr = ocm_hdr->read_addr;
1853 r_stride = ocm_hdr->read_addr_stride; 1927 r_stride = ocm_hdr->read_addr_stride;
1854 loop_cnt = ocm_hdr->op_count; 1928 loop_cnt = ocm_hdr->op_count;
@@ -1863,20 +1937,20 @@ static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
1863 r_addr += r_stride; 1937 r_addr += r_stride;
1864 } 1938 }
1865 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n", 1939 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
1866 __func__, (loop_cnt * sizeof(uint32_t)))); 1940 __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t))));
1867 *d_ptr = data_ptr; 1941 *d_ptr = data_ptr;
1868} 1942}
1869 1943
1870static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha, 1944static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
1871 struct qla82xx_minidump_entry_hdr *entry_hdr, 1945 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1872 uint32_t **d_ptr) 1946 uint32_t **d_ptr)
1873{ 1947{
1874 uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; 1948 uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
1875 struct qla82xx_minidump_entry_mux *mux_hdr; 1949 struct qla8xxx_minidump_entry_mux *mux_hdr;
1876 uint32_t *data_ptr = *d_ptr; 1950 uint32_t *data_ptr = *d_ptr;
1877 1951
1878 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); 1952 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1879 mux_hdr = (struct qla82xx_minidump_entry_mux *)entry_hdr; 1953 mux_hdr = (struct qla8xxx_minidump_entry_mux *)entry_hdr;
1880 r_addr = mux_hdr->read_addr; 1954 r_addr = mux_hdr->read_addr;
1881 s_addr = mux_hdr->select_addr; 1955 s_addr = mux_hdr->select_addr;
1882 s_stride = mux_hdr->select_value_stride; 1956 s_stride = mux_hdr->select_value_stride;
@@ -1884,8 +1958,8 @@ static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
1884 loop_cnt = mux_hdr->op_count; 1958 loop_cnt = mux_hdr->op_count;
1885 1959
1886 for (i = 0; i < loop_cnt; i++) { 1960 for (i = 0; i < loop_cnt; i++) {
1887 qla4_8xxx_md_rw_32(ha, s_addr, s_value, 1); 1961 ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value);
1888 r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0); 1962 ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
1889 *data_ptr++ = cpu_to_le32(s_value); 1963 *data_ptr++ = cpu_to_le32(s_value);
1890 *data_ptr++ = cpu_to_le32(r_value); 1964 *data_ptr++ = cpu_to_le32(r_value);
1891 s_value += s_stride; 1965 s_value += s_stride;
@@ -1894,16 +1968,16 @@ static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
1894} 1968}
1895 1969
1896static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha, 1970static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
1897 struct qla82xx_minidump_entry_hdr *entry_hdr, 1971 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1898 uint32_t **d_ptr) 1972 uint32_t **d_ptr)
1899{ 1973{
1900 uint32_t addr, r_addr, c_addr, t_r_addr; 1974 uint32_t addr, r_addr, c_addr, t_r_addr;
1901 uint32_t i, k, loop_count, t_value, r_cnt, r_value; 1975 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
1902 uint32_t c_value_w; 1976 uint32_t c_value_w;
1903 struct qla82xx_minidump_entry_cache *cache_hdr; 1977 struct qla8xxx_minidump_entry_cache *cache_hdr;
1904 uint32_t *data_ptr = *d_ptr; 1978 uint32_t *data_ptr = *d_ptr;
1905 1979
1906 cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr; 1980 cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr;
1907 loop_count = cache_hdr->op_count; 1981 loop_count = cache_hdr->op_count;
1908 r_addr = cache_hdr->read_addr; 1982 r_addr = cache_hdr->read_addr;
1909 c_addr = cache_hdr->control_addr; 1983 c_addr = cache_hdr->control_addr;
@@ -1914,11 +1988,11 @@ static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
1914 r_cnt = cache_hdr->read_ctrl.read_addr_cnt; 1988 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
1915 1989
1916 for (i = 0; i < loop_count; i++) { 1990 for (i = 0; i < loop_count; i++) {
1917 qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1); 1991 ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value);
1918 qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1); 1992 ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w);
1919 addr = r_addr; 1993 addr = r_addr;
1920 for (k = 0; k < r_cnt; k++) { 1994 for (k = 0; k < r_cnt; k++) {
1921 r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0); 1995 ha->isp_ops->rd_reg_indirect(ha, addr, &r_value);
1922 *data_ptr++ = cpu_to_le32(r_value); 1996 *data_ptr++ = cpu_to_le32(r_value);
1923 addr += cache_hdr->read_ctrl.read_addr_stride; 1997 addr += cache_hdr->read_ctrl.read_addr_stride;
1924 } 1998 }
@@ -1928,27 +2002,27 @@ static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
1928} 2002}
1929 2003
1930static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha, 2004static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
1931 struct qla82xx_minidump_entry_hdr *entry_hdr, 2005 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1932 uint32_t **d_ptr) 2006 uint32_t **d_ptr)
1933{ 2007{
1934 uint32_t s_addr, r_addr; 2008 uint32_t s_addr, r_addr;
1935 uint32_t r_stride, r_value, r_cnt, qid = 0; 2009 uint32_t r_stride, r_value, r_cnt, qid = 0;
1936 uint32_t i, k, loop_cnt; 2010 uint32_t i, k, loop_cnt;
1937 struct qla82xx_minidump_entry_queue *q_hdr; 2011 struct qla8xxx_minidump_entry_queue *q_hdr;
1938 uint32_t *data_ptr = *d_ptr; 2012 uint32_t *data_ptr = *d_ptr;
1939 2013
1940 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); 2014 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1941 q_hdr = (struct qla82xx_minidump_entry_queue *)entry_hdr; 2015 q_hdr = (struct qla8xxx_minidump_entry_queue *)entry_hdr;
1942 s_addr = q_hdr->select_addr; 2016 s_addr = q_hdr->select_addr;
1943 r_cnt = q_hdr->rd_strd.read_addr_cnt; 2017 r_cnt = q_hdr->rd_strd.read_addr_cnt;
1944 r_stride = q_hdr->rd_strd.read_addr_stride; 2018 r_stride = q_hdr->rd_strd.read_addr_stride;
1945 loop_cnt = q_hdr->op_count; 2019 loop_cnt = q_hdr->op_count;
1946 2020
1947 for (i = 0; i < loop_cnt; i++) { 2021 for (i = 0; i < loop_cnt; i++) {
1948 qla4_8xxx_md_rw_32(ha, s_addr, qid, 1); 2022 ha->isp_ops->wr_reg_indirect(ha, s_addr, qid);
1949 r_addr = q_hdr->read_addr; 2023 r_addr = q_hdr->read_addr;
1950 for (k = 0; k < r_cnt; k++) { 2024 for (k = 0; k < r_cnt; k++) {
1951 r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0); 2025 ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
1952 *data_ptr++ = cpu_to_le32(r_value); 2026 *data_ptr++ = cpu_to_le32(r_value);
1953 r_addr += r_stride; 2027 r_addr += r_stride;
1954 } 2028 }
@@ -1960,17 +2034,17 @@ static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
1960#define MD_DIRECT_ROM_WINDOW 0x42110030 2034#define MD_DIRECT_ROM_WINDOW 0x42110030
1961#define MD_DIRECT_ROM_READ_BASE 0x42150000 2035#define MD_DIRECT_ROM_READ_BASE 0x42150000
1962 2036
1963static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha, 2037static void qla4_82xx_minidump_process_rdrom(struct scsi_qla_host *ha,
1964 struct qla82xx_minidump_entry_hdr *entry_hdr, 2038 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1965 uint32_t **d_ptr) 2039 uint32_t **d_ptr)
1966{ 2040{
1967 uint32_t r_addr, r_value; 2041 uint32_t r_addr, r_value;
1968 uint32_t i, loop_cnt; 2042 uint32_t i, loop_cnt;
1969 struct qla82xx_minidump_entry_rdrom *rom_hdr; 2043 struct qla8xxx_minidump_entry_rdrom *rom_hdr;
1970 uint32_t *data_ptr = *d_ptr; 2044 uint32_t *data_ptr = *d_ptr;
1971 2045
1972 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); 2046 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1973 rom_hdr = (struct qla82xx_minidump_entry_rdrom *)entry_hdr; 2047 rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr;
1974 r_addr = rom_hdr->read_addr; 2048 r_addr = rom_hdr->read_addr;
1975 loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t); 2049 loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
1976 2050
@@ -1979,11 +2053,11 @@ static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
1979 __func__, r_addr, loop_cnt)); 2053 __func__, r_addr, loop_cnt));
1980 2054
1981 for (i = 0; i < loop_cnt; i++) { 2055 for (i = 0; i < loop_cnt; i++) {
1982 qla4_8xxx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, 2056 ha->isp_ops->wr_reg_indirect(ha, MD_DIRECT_ROM_WINDOW,
1983 (r_addr & 0xFFFF0000), 1); 2057 (r_addr & 0xFFFF0000));
1984 r_value = qla4_8xxx_md_rw_32(ha, 2058 ha->isp_ops->rd_reg_indirect(ha,
1985 MD_DIRECT_ROM_READ_BASE + 2059 MD_DIRECT_ROM_READ_BASE + (r_addr & 0x0000FFFF),
1986 (r_addr & 0x0000FFFF), 0, 0); 2060 &r_value);
1987 *data_ptr++ = cpu_to_le32(r_value); 2061 *data_ptr++ = cpu_to_le32(r_value);
1988 r_addr += sizeof(uint32_t); 2062 r_addr += sizeof(uint32_t);
1989 } 2063 }
@@ -1995,17 +2069,17 @@ static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
1995#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 2069#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
1996 2070
1997static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, 2071static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
1998 struct qla82xx_minidump_entry_hdr *entry_hdr, 2072 struct qla8xxx_minidump_entry_hdr *entry_hdr,
1999 uint32_t **d_ptr) 2073 uint32_t **d_ptr)
2000{ 2074{
2001 uint32_t r_addr, r_value, r_data; 2075 uint32_t r_addr, r_value, r_data;
2002 uint32_t i, j, loop_cnt; 2076 uint32_t i, j, loop_cnt;
2003 struct qla82xx_minidump_entry_rdmem *m_hdr; 2077 struct qla8xxx_minidump_entry_rdmem *m_hdr;
2004 unsigned long flags; 2078 unsigned long flags;
2005 uint32_t *data_ptr = *d_ptr; 2079 uint32_t *data_ptr = *d_ptr;
2006 2080
2007 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); 2081 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2008 m_hdr = (struct qla82xx_minidump_entry_rdmem *)entry_hdr; 2082 m_hdr = (struct qla8xxx_minidump_entry_rdmem *)entry_hdr;
2009 r_addr = m_hdr->read_addr; 2083 r_addr = m_hdr->read_addr;
2010 loop_cnt = m_hdr->read_data_size/16; 2084 loop_cnt = m_hdr->read_data_size/16;
2011 2085
@@ -2033,17 +2107,19 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2033 2107
2034 write_lock_irqsave(&ha->hw_lock, flags); 2108 write_lock_irqsave(&ha->hw_lock, flags);
2035 for (i = 0; i < loop_cnt; i++) { 2109 for (i = 0; i < loop_cnt; i++) {
2036 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1); 2110 ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO,
2111 r_addr);
2037 r_value = 0; 2112 r_value = 0;
2038 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1); 2113 ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI,
2114 r_value);
2039 r_value = MIU_TA_CTL_ENABLE; 2115 r_value = MIU_TA_CTL_ENABLE;
2040 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); 2116 ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value);
2041 r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; 2117 r_value = MIU_TA_CTL_START_ENABLE;
2042 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); 2118 ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value);
2043 2119
2044 for (j = 0; j < MAX_CTL_CHECK; j++) { 2120 for (j = 0; j < MAX_CTL_CHECK; j++) {
2045 r_value = qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, 2121 ha->isp_ops->rd_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
2046 0, 0); 2122 &r_value);
2047 if ((r_value & MIU_TA_CTL_BUSY) == 0) 2123 if ((r_value & MIU_TA_CTL_BUSY) == 0)
2048 break; 2124 break;
2049 } 2125 }
@@ -2057,9 +2133,9 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2057 } 2133 }
2058 2134
2059 for (j = 0; j < 4; j++) { 2135 for (j = 0; j < 4; j++) {
2060 r_data = qla4_8xxx_md_rw_32(ha, 2136 ha->isp_ops->rd_reg_indirect(ha,
2061 MD_MIU_TEST_AGT_RDDATA[j], 2137 MD_MIU_TEST_AGT_RDDATA[j],
2062 0, 0); 2138 &r_data);
2063 *data_ptr++ = cpu_to_le32(r_data); 2139 *data_ptr++ = cpu_to_le32(r_data);
2064 } 2140 }
2065 2141
@@ -2074,25 +2150,215 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2074 return QLA_SUCCESS; 2150 return QLA_SUCCESS;
2075} 2151}
2076 2152
2077static void ql4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha, 2153static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
2078 struct qla82xx_minidump_entry_hdr *entry_hdr, 2154 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2079 int index) 2155 int index)
2080{ 2156{
2081 entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; 2157 entry_hdr->d_ctrl.driver_flags |= QLA8XXX_DBG_SKIPPED_FLAG;
2082 DEBUG2(ql4_printk(KERN_INFO, ha, 2158 DEBUG2(ql4_printk(KERN_INFO, ha,
2083 "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", 2159 "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
2084 ha->host_no, index, entry_hdr->entry_type, 2160 ha->host_no, index, entry_hdr->entry_type,
2085 entry_hdr->d_ctrl.entry_capture_mask)); 2161 entry_hdr->d_ctrl.entry_capture_mask));
2086} 2162}
2087 2163
2164/* ISP83xx functions to process new minidump entries... */
2165static uint32_t qla83xx_minidump_process_pollrd(struct scsi_qla_host *ha,
2166 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2167 uint32_t **d_ptr)
2168{
2169 uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
2170 uint16_t s_stride, i;
2171 uint32_t *data_ptr = *d_ptr;
2172 uint32_t rval = QLA_SUCCESS;
2173 struct qla83xx_minidump_entry_pollrd *pollrd_hdr;
2174
2175 pollrd_hdr = (struct qla83xx_minidump_entry_pollrd *)entry_hdr;
2176 s_addr = le32_to_cpu(pollrd_hdr->select_addr);
2177 r_addr = le32_to_cpu(pollrd_hdr->read_addr);
2178 s_value = le32_to_cpu(pollrd_hdr->select_value);
2179 s_stride = le32_to_cpu(pollrd_hdr->select_value_stride);
2180
2181 poll_wait = le32_to_cpu(pollrd_hdr->poll_wait);
2182 poll_mask = le32_to_cpu(pollrd_hdr->poll_mask);
2183
2184 for (i = 0; i < le32_to_cpu(pollrd_hdr->op_count); i++) {
2185 ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value);
2186 poll_wait = le32_to_cpu(pollrd_hdr->poll_wait);
2187 while (1) {
2188 ha->isp_ops->rd_reg_indirect(ha, s_addr, &r_value);
2189
2190 if ((r_value & poll_mask) != 0) {
2191 break;
2192 } else {
2193 msleep(1);
2194 if (--poll_wait == 0) {
2195 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
2196 __func__);
2197 rval = QLA_ERROR;
2198 goto exit_process_pollrd;
2199 }
2200 }
2201 }
2202 ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
2203 *data_ptr++ = cpu_to_le32(s_value);
2204 *data_ptr++ = cpu_to_le32(r_value);
2205 s_value += s_stride;
2206 }
2207
2208 *d_ptr = data_ptr;
2209
2210exit_process_pollrd:
2211 return rval;
2212}
2213
2214static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha,
2215 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2216 uint32_t **d_ptr)
2217{
2218 uint32_t sel_val1, sel_val2, t_sel_val, data, i;
2219 uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
2220 struct qla83xx_minidump_entry_rdmux2 *rdmux2_hdr;
2221 uint32_t *data_ptr = *d_ptr;
2222
2223 rdmux2_hdr = (struct qla83xx_minidump_entry_rdmux2 *)entry_hdr;
2224 sel_val1 = le32_to_cpu(rdmux2_hdr->select_value_1);
2225 sel_val2 = le32_to_cpu(rdmux2_hdr->select_value_2);
2226 sel_addr1 = le32_to_cpu(rdmux2_hdr->select_addr_1);
2227 sel_addr2 = le32_to_cpu(rdmux2_hdr->select_addr_2);
2228 sel_val_mask = le32_to_cpu(rdmux2_hdr->select_value_mask);
2229 read_addr = le32_to_cpu(rdmux2_hdr->read_addr);
2230
2231 for (i = 0; i < rdmux2_hdr->op_count; i++) {
2232 ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val1);
2233 t_sel_val = sel_val1 & sel_val_mask;
2234 *data_ptr++ = cpu_to_le32(t_sel_val);
2235
2236 ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val);
2237 ha->isp_ops->rd_reg_indirect(ha, read_addr, &data);
2238
2239 *data_ptr++ = cpu_to_le32(data);
2240
2241 ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val2);
2242 t_sel_val = sel_val2 & sel_val_mask;
2243 *data_ptr++ = cpu_to_le32(t_sel_val);
2244
2245 ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val);
2246 ha->isp_ops->rd_reg_indirect(ha, read_addr, &data);
2247
2248 *data_ptr++ = cpu_to_le32(data);
2249
2250 sel_val1 += rdmux2_hdr->select_value_stride;
2251 sel_val2 += rdmux2_hdr->select_value_stride;
2252 }
2253
2254 *d_ptr = data_ptr;
2255}
2256
2257static uint32_t qla83xx_minidump_process_pollrdmwr(struct scsi_qla_host *ha,
2258 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2259 uint32_t **d_ptr)
2260{
2261 uint32_t poll_wait, poll_mask, r_value, data;
2262 uint32_t addr_1, addr_2, value_1, value_2;
2263 uint32_t *data_ptr = *d_ptr;
2264 uint32_t rval = QLA_SUCCESS;
2265 struct qla83xx_minidump_entry_pollrdmwr *poll_hdr;
2266
2267 poll_hdr = (struct qla83xx_minidump_entry_pollrdmwr *)entry_hdr;
2268 addr_1 = le32_to_cpu(poll_hdr->addr_1);
2269 addr_2 = le32_to_cpu(poll_hdr->addr_2);
2270 value_1 = le32_to_cpu(poll_hdr->value_1);
2271 value_2 = le32_to_cpu(poll_hdr->value_2);
2272 poll_mask = le32_to_cpu(poll_hdr->poll_mask);
2273
2274 ha->isp_ops->wr_reg_indirect(ha, addr_1, value_1);
2275
2276 poll_wait = le32_to_cpu(poll_hdr->poll_wait);
2277 while (1) {
2278 ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value);
2279
2280 if ((r_value & poll_mask) != 0) {
2281 break;
2282 } else {
2283 msleep(1);
2284 if (--poll_wait == 0) {
2285 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_1\n",
2286 __func__);
2287 rval = QLA_ERROR;
2288 goto exit_process_pollrdmwr;
2289 }
2290 }
2291 }
2292
2293 ha->isp_ops->rd_reg_indirect(ha, addr_2, &data);
2294 data &= le32_to_cpu(poll_hdr->modify_mask);
2295 ha->isp_ops->wr_reg_indirect(ha, addr_2, data);
2296 ha->isp_ops->wr_reg_indirect(ha, addr_1, value_2);
2297
2298 poll_wait = le32_to_cpu(poll_hdr->poll_wait);
2299 while (1) {
2300 ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value);
2301
2302 if ((r_value & poll_mask) != 0) {
2303 break;
2304 } else {
2305 msleep(1);
2306 if (--poll_wait == 0) {
2307 ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_2\n",
2308 __func__);
2309 rval = QLA_ERROR;
2310 goto exit_process_pollrdmwr;
2311 }
2312 }
2313 }
2314
2315 *data_ptr++ = cpu_to_le32(addr_2);
2316 *data_ptr++ = cpu_to_le32(data);
2317 *d_ptr = data_ptr;
2318
2319exit_process_pollrdmwr:
2320 return rval;
2321}
2322
2323static uint32_t qla4_83xx_minidump_process_rdrom(struct scsi_qla_host *ha,
2324 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2325 uint32_t **d_ptr)
2326{
2327 uint32_t fl_addr, u32_count, rval;
2328 struct qla8xxx_minidump_entry_rdrom *rom_hdr;
2329 uint32_t *data_ptr = *d_ptr;
2330
2331 rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr;
2332 fl_addr = le32_to_cpu(rom_hdr->read_addr);
2333 u32_count = le32_to_cpu(rom_hdr->read_data_size)/sizeof(uint32_t);
2334
2335 DEBUG2(ql4_printk(KERN_INFO, ha, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
2336 __func__, fl_addr, u32_count));
2337
2338 rval = qla4_83xx_lockless_flash_read_u32(ha, fl_addr,
2339 (u8 *)(data_ptr), u32_count);
2340
2341 if (rval == QLA_ERROR) {
2342 ql4_printk(KERN_ERR, ha, "%s: Flash Read Error,Count=%d\n",
2343 __func__, u32_count);
2344 goto exit_process_rdrom;
2345 }
2346
2347 data_ptr += u32_count;
2348 *d_ptr = data_ptr;
2349
2350exit_process_rdrom:
2351 return rval;
2352}
2353
2088/** 2354/**
2089 * qla82xx_collect_md_data - Retrieve firmware minidump data. 2355 * qla4_8xxx_collect_md_data - Retrieve firmware minidump data.
2090 * @ha: pointer to adapter structure 2356 * @ha: pointer to adapter structure
2091 **/ 2357 **/
2092static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) 2358static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2093{ 2359{
2094 int num_entry_hdr = 0; 2360 int num_entry_hdr = 0;
2095 struct qla82xx_minidump_entry_hdr *entry_hdr; 2361 struct qla8xxx_minidump_entry_hdr *entry_hdr;
2096 struct qla4_8xxx_minidump_template_hdr *tmplt_hdr; 2362 struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
2097 uint32_t *data_ptr; 2363 uint32_t *data_ptr;
2098 uint32_t data_collected = 0; 2364 uint32_t data_collected = 0;
@@ -2128,10 +2394,14 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2128 timestamp = (u32)(jiffies_to_msecs(now) / 1000); 2394 timestamp = (u32)(jiffies_to_msecs(now) / 1000);
2129 tmplt_hdr->driver_timestamp = timestamp; 2395 tmplt_hdr->driver_timestamp = timestamp;
2130 2396
2131 entry_hdr = (struct qla82xx_minidump_entry_hdr *) 2397 entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
2132 (((uint8_t *)ha->fw_dump_tmplt_hdr) + 2398 (((uint8_t *)ha->fw_dump_tmplt_hdr) +
2133 tmplt_hdr->first_entry_offset); 2399 tmplt_hdr->first_entry_offset);
2134 2400
2401 if (is_qla8032(ha))
2402 tmplt_hdr->saved_state_array[QLA83XX_SS_OCM_WNDREG_INDEX] =
2403 tmplt_hdr->ocm_window_reg[ha->func_num];
2404
2135 /* Walk through the entry headers - validate/perform required action */ 2405 /* Walk through the entry headers - validate/perform required action */
2136 for (i = 0; i < num_entry_hdr; i++) { 2406 for (i = 0; i < num_entry_hdr; i++) {
2137 if (data_collected >= ha->fw_dump_size) { 2407 if (data_collected >= ha->fw_dump_size) {
@@ -2144,7 +2414,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2144 if (!(entry_hdr->d_ctrl.entry_capture_mask & 2414 if (!(entry_hdr->d_ctrl.entry_capture_mask &
2145 ha->fw_dump_capture_mask)) { 2415 ha->fw_dump_capture_mask)) {
2146 entry_hdr->d_ctrl.driver_flags |= 2416 entry_hdr->d_ctrl.driver_flags |=
2147 QLA82XX_DBG_SKIPPED_FLAG; 2417 QLA8XXX_DBG_SKIPPED_FLAG;
2148 goto skip_nxt_entry; 2418 goto skip_nxt_entry;
2149 } 2419 }
2150 2420
@@ -2157,65 +2427,105 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2157 * debug data 2427 * debug data
2158 */ 2428 */
2159 switch (entry_hdr->entry_type) { 2429 switch (entry_hdr->entry_type) {
2160 case QLA82XX_RDEND: 2430 case QLA8XXX_RDEND:
2161 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i); 2431 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2162 break; 2432 break;
2163 case QLA82XX_CNTRL: 2433 case QLA8XXX_CNTRL:
2164 rval = qla4_8xxx_minidump_process_control(ha, 2434 rval = qla4_8xxx_minidump_process_control(ha,
2165 entry_hdr); 2435 entry_hdr);
2166 if (rval != QLA_SUCCESS) { 2436 if (rval != QLA_SUCCESS) {
2167 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i); 2437 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2168 goto md_failed; 2438 goto md_failed;
2169 } 2439 }
2170 break; 2440 break;
2171 case QLA82XX_RDCRB: 2441 case QLA8XXX_RDCRB:
2172 qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr, 2442 qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
2173 &data_ptr); 2443 &data_ptr);
2174 break; 2444 break;
2175 case QLA82XX_RDMEM: 2445 case QLA8XXX_RDMEM:
2176 rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, 2446 rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
2177 &data_ptr); 2447 &data_ptr);
2178 if (rval != QLA_SUCCESS) { 2448 if (rval != QLA_SUCCESS) {
2179 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i); 2449 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2180 goto md_failed; 2450 goto md_failed;
2181 } 2451 }
2182 break; 2452 break;
2183 case QLA82XX_BOARD: 2453 case QLA8XXX_BOARD:
2184 case QLA82XX_RDROM: 2454 case QLA8XXX_RDROM:
2185 qla4_8xxx_minidump_process_rdrom(ha, entry_hdr, 2455 if (is_qla8022(ha)) {
2186 &data_ptr); 2456 qla4_82xx_minidump_process_rdrom(ha, entry_hdr,
2457 &data_ptr);
2458 } else if (is_qla8032(ha)) {
2459 rval = qla4_83xx_minidump_process_rdrom(ha,
2460 entry_hdr,
2461 &data_ptr);
2462 if (rval != QLA_SUCCESS)
2463 qla4_8xxx_mark_entry_skipped(ha,
2464 entry_hdr,
2465 i);
2466 }
2187 break; 2467 break;
2188 case QLA82XX_L2DTG: 2468 case QLA8XXX_L2DTG:
2189 case QLA82XX_L2ITG: 2469 case QLA8XXX_L2ITG:
2190 case QLA82XX_L2DAT: 2470 case QLA8XXX_L2DAT:
2191 case QLA82XX_L2INS: 2471 case QLA8XXX_L2INS:
2192 rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr, 2472 rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
2193 &data_ptr); 2473 &data_ptr);
2194 if (rval != QLA_SUCCESS) { 2474 if (rval != QLA_SUCCESS) {
2195 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i); 2475 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2196 goto md_failed; 2476 goto md_failed;
2197 } 2477 }
2198 break; 2478 break;
2199 case QLA82XX_L1DAT: 2479 case QLA8XXX_L1DTG:
2200 case QLA82XX_L1INS: 2480 case QLA8XXX_L1ITG:
2481 case QLA8XXX_L1DAT:
2482 case QLA8XXX_L1INS:
2201 qla4_8xxx_minidump_process_l1cache(ha, entry_hdr, 2483 qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
2202 &data_ptr); 2484 &data_ptr);
2203 break; 2485 break;
2204 case QLA82XX_RDOCM: 2486 case QLA8XXX_RDOCM:
2205 qla4_8xxx_minidump_process_rdocm(ha, entry_hdr, 2487 qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
2206 &data_ptr); 2488 &data_ptr);
2207 break; 2489 break;
2208 case QLA82XX_RDMUX: 2490 case QLA8XXX_RDMUX:
2209 qla4_8xxx_minidump_process_rdmux(ha, entry_hdr, 2491 qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
2210 &data_ptr); 2492 &data_ptr);
2211 break; 2493 break;
2212 case QLA82XX_QUEUE: 2494 case QLA8XXX_QUEUE:
2213 qla4_8xxx_minidump_process_queue(ha, entry_hdr, 2495 qla4_8xxx_minidump_process_queue(ha, entry_hdr,
2214 &data_ptr); 2496 &data_ptr);
2215 break; 2497 break;
2216 case QLA82XX_RDNOP: 2498 case QLA83XX_POLLRD:
2499 if (!is_qla8032(ha)) {
2500 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2501 break;
2502 }
2503 rval = qla83xx_minidump_process_pollrd(ha, entry_hdr,
2504 &data_ptr);
2505 if (rval != QLA_SUCCESS)
2506 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2507 break;
2508 case QLA83XX_RDMUX2:
2509 if (!is_qla8032(ha)) {
2510 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2511 break;
2512 }
2513 qla83xx_minidump_process_rdmux2(ha, entry_hdr,
2514 &data_ptr);
2515 break;
2516 case QLA83XX_POLLRDMWR:
2517 if (!is_qla8032(ha)) {
2518 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2519 break;
2520 }
2521 rval = qla83xx_minidump_process_pollrdmwr(ha, entry_hdr,
2522 &data_ptr);
2523 if (rval != QLA_SUCCESS)
2524 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2525 break;
2526 case QLA8XXX_RDNOP:
2217 default: 2527 default:
2218 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i); 2528 qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2219 break; 2529 break;
2220 } 2530 }
2221 2531
@@ -2224,7 +2534,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2224 ha->fw_dump_tmplt_size)); 2534 ha->fw_dump_tmplt_size));
2225skip_nxt_entry: 2535skip_nxt_entry:
2226 /* next entry in the template */ 2536 /* next entry in the template */
2227 entry_hdr = (struct qla82xx_minidump_entry_hdr *) 2537 entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
2228 (((uint8_t *)entry_hdr) + 2538 (((uint8_t *)entry_hdr) +
2229 entry_hdr->entry_size); 2539 entry_hdr->entry_size);
2230 } 2540 }
@@ -2264,33 +2574,45 @@ static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
2264 kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp); 2574 kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
2265} 2575}
2266 2576
2577void qla4_8xxx_get_minidump(struct scsi_qla_host *ha)
2578{
2579 if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
2580 !test_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
2581 if (!qla4_8xxx_collect_md_data(ha)) {
2582 qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
2583 set_bit(AF_82XX_FW_DUMPED, &ha->flags);
2584 } else {
2585 ql4_printk(KERN_INFO, ha, "%s: Unable to collect minidump\n",
2586 __func__);
2587 }
2588 }
2589}
2590
2267/** 2591/**
2268 * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw 2592 * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
2269 * @ha: pointer to adapter structure 2593 * @ha: pointer to adapter structure
2270 * 2594 *
2271 * Note: IDC lock must be held upon entry 2595 * Note: IDC lock must be held upon entry
2272 **/ 2596 **/
2273static int 2597int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
2274qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
2275{ 2598{
2276 int rval = QLA_ERROR; 2599 int rval = QLA_ERROR;
2277 int i, timeout; 2600 int i, timeout;
2278 uint32_t old_count, count; 2601 uint32_t old_count, count, idc_ctrl;
2279 int need_reset = 0, peg_stuck = 1; 2602 int need_reset = 0, peg_stuck = 1;
2280 2603
2281 need_reset = qla4_8xxx_need_reset(ha); 2604 need_reset = ha->isp_ops->need_reset(ha);
2282 2605 old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
2283 old_count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
2284 2606
2285 for (i = 0; i < 10; i++) { 2607 for (i = 0; i < 10; i++) {
2286 timeout = msleep_interruptible(200); 2608 timeout = msleep_interruptible(200);
2287 if (timeout) { 2609 if (timeout) {
2288 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2610 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
2289 QLA82XX_DEV_FAILED); 2611 QLA8XXX_DEV_FAILED);
2290 return rval; 2612 return rval;
2291 } 2613 }
2292 2614
2293 count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 2615 count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
2294 if (count != old_count) 2616 if (count != old_count)
2295 peg_stuck = 0; 2617 peg_stuck = 0;
2296 } 2618 }
@@ -2298,13 +2620,13 @@ qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
2298 if (need_reset) { 2620 if (need_reset) {
2299 /* We are trying to perform a recovery here. */ 2621 /* We are trying to perform a recovery here. */
2300 if (peg_stuck) 2622 if (peg_stuck)
2301 qla4_8xxx_rom_lock_recovery(ha); 2623 ha->isp_ops->rom_lock_recovery(ha);
2302 goto dev_initialize; 2624 goto dev_initialize;
2303 } else { 2625 } else {
2304 /* Start of day for this ha context. */ 2626 /* Start of day for this ha context. */
2305 if (peg_stuck) { 2627 if (peg_stuck) {
2306 /* Either we are the first or recovery in progress. */ 2628 /* Either we are the first or recovery in progress. */
2307 qla4_8xxx_rom_lock_recovery(ha); 2629 ha->isp_ops->rom_lock_recovery(ha);
2308 goto dev_initialize; 2630 goto dev_initialize;
2309 } else { 2631 } else {
2310 /* Firmware already running. */ 2632 /* Firmware already running. */
@@ -2316,46 +2638,53 @@ qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
2316dev_initialize: 2638dev_initialize:
2317 /* set to DEV_INITIALIZING */ 2639 /* set to DEV_INITIALIZING */
2318 ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); 2640 ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
2319 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING); 2641 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
2642 QLA8XXX_DEV_INITIALIZING);
2320 2643
2321 /* Driver that sets device state to initializating sets IDC version */ 2644 /*
2322 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION); 2645 * For ISP8324, if IDC_CTRL GRACEFUL_RESET_BIT1 is set, reset it after
2323 2646 * device goes to INIT state.
2324 qla4_8xxx_idc_unlock(ha); 2647 */
2325 if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) && 2648 if (is_qla8032(ha)) {
2326 !test_and_set_bit(AF_82XX_FW_DUMPED, &ha->flags)) { 2649 idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
2327 if (!qla4_8xxx_collect_md_data(ha)) { 2650 if (idc_ctrl & GRACEFUL_RESET_BIT1) {
2328 qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP); 2651 qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
2329 } else { 2652 (idc_ctrl & ~GRACEFUL_RESET_BIT1));
2330 ql4_printk(KERN_INFO, ha, "Unable to collect minidump\n"); 2653 set_bit(AF_83XX_NO_FW_DUMP, &ha->flags);
2331 clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
2332 } 2654 }
2333 } 2655 }
2334 rval = qla4_8xxx_try_start_fw(ha); 2656
2335 qla4_8xxx_idc_lock(ha); 2657 ha->isp_ops->idc_unlock(ha);
2658
2659 if (is_qla8022(ha))
2660 qla4_8xxx_get_minidump(ha);
2661
2662 rval = ha->isp_ops->restart_firmware(ha);
2663 ha->isp_ops->idc_lock(ha);
2336 2664
2337 if (rval != QLA_SUCCESS) { 2665 if (rval != QLA_SUCCESS) {
2338 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); 2666 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
2339 qla4_8xxx_clear_drv_active(ha); 2667 qla4_8xxx_clear_drv_active(ha);
2340 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); 2668 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
2669 QLA8XXX_DEV_FAILED);
2341 return rval; 2670 return rval;
2342 } 2671 }
2343 2672
2344dev_ready: 2673dev_ready:
2345 ql4_printk(KERN_INFO, ha, "HW State: READY\n"); 2674 ql4_printk(KERN_INFO, ha, "HW State: READY\n");
2346 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); 2675 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_READY);
2347 2676
2348 return rval; 2677 return rval;
2349} 2678}
2350 2679
2351/** 2680/**
2352 * qla4_8xxx_need_reset_handler - Code to start reset sequence 2681 * qla4_82xx_need_reset_handler - Code to start reset sequence
2353 * @ha: pointer to adapter structure 2682 * @ha: pointer to adapter structure
2354 * 2683 *
2355 * Note: IDC lock must be held upon entry 2684 * Note: IDC lock must be held upon entry
2356 **/ 2685 **/
2357static void 2686static void
2358qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha) 2687qla4_82xx_need_reset_handler(struct scsi_qla_host *ha)
2359{ 2688{
2360 uint32_t dev_state, drv_state, drv_active; 2689 uint32_t dev_state, drv_state, drv_active;
2361 uint32_t active_mask = 0xFFFFFFFF; 2690 uint32_t active_mask = 0xFFFFFFFF;
@@ -2365,12 +2694,12 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
2365 "Performing ISP error recovery\n"); 2694 "Performing ISP error recovery\n");
2366 2695
2367 if (test_and_clear_bit(AF_ONLINE, &ha->flags)) { 2696 if (test_and_clear_bit(AF_ONLINE, &ha->flags)) {
2368 qla4_8xxx_idc_unlock(ha); 2697 qla4_82xx_idc_unlock(ha);
2369 ha->isp_ops->disable_intrs(ha); 2698 ha->isp_ops->disable_intrs(ha);
2370 qla4_8xxx_idc_lock(ha); 2699 qla4_82xx_idc_lock(ha);
2371 } 2700 }
2372 2701
2373 if (!test_bit(AF_82XX_RST_OWNER, &ha->flags)) { 2702 if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
2374 DEBUG2(ql4_printk(KERN_INFO, ha, 2703 DEBUG2(ql4_printk(KERN_INFO, ha,
2375 "%s(%ld): reset acknowledged\n", 2704 "%s(%ld): reset acknowledged\n",
2376 __func__, ha->host_no)); 2705 __func__, ha->host_no));
@@ -2382,8 +2711,8 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
2382 /* wait for 10 seconds for reset ack from all functions */ 2711 /* wait for 10 seconds for reset ack from all functions */
2383 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); 2712 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
2384 2713
2385 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2714 drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2386 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2715 drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2387 2716
2388 ql4_printk(KERN_INFO, ha, 2717 ql4_printk(KERN_INFO, ha,
2389 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", 2718 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
@@ -2401,31 +2730,31 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
2401 * When reset_owner times out, check which functions 2730 * When reset_owner times out, check which functions
2402 * acked/did not ack 2731 * acked/did not ack
2403 */ 2732 */
2404 if (test_bit(AF_82XX_RST_OWNER, &ha->flags)) { 2733 if (test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
2405 ql4_printk(KERN_INFO, ha, 2734 ql4_printk(KERN_INFO, ha,
2406 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", 2735 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
2407 __func__, ha->host_no, drv_state, 2736 __func__, ha->host_no, drv_state,
2408 drv_active); 2737 drv_active);
2409 } 2738 }
2410 qla4_8xxx_idc_unlock(ha); 2739 qla4_82xx_idc_unlock(ha);
2411 msleep(1000); 2740 msleep(1000);
2412 qla4_8xxx_idc_lock(ha); 2741 qla4_82xx_idc_lock(ha);
2413 2742
2414 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2743 drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2415 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2744 drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2416 } 2745 }
2417 2746
2418 /* Clear RESET OWNER as we are not going to use it any further */ 2747 /* Clear RESET OWNER as we are not going to use it any further */
2419 clear_bit(AF_82XX_RST_OWNER, &ha->flags); 2748 clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
2420 2749
2421 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2750 dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2422 ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state, 2751 ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
2423 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 2752 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
2424 2753
2425 /* Force to DEV_COLD unless someone else is starting a reset */ 2754 /* Force to DEV_COLD unless someone else is starting a reset */
2426 if (dev_state != QLA82XX_DEV_INITIALIZING) { 2755 if (dev_state != QLA8XXX_DEV_INITIALIZING) {
2427 ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n"); 2756 ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
2428 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 2757 qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD);
2429 qla4_8xxx_set_rst_ready(ha); 2758 qla4_8xxx_set_rst_ready(ha);
2430 } 2759 }
2431} 2760}
@@ -2437,9 +2766,104 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
2437void 2766void
2438qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha) 2767qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha)
2439{ 2768{
2440 qla4_8xxx_idc_lock(ha); 2769 ha->isp_ops->idc_lock(ha);
2441 qla4_8xxx_set_qsnt_ready(ha); 2770 qla4_8xxx_set_qsnt_ready(ha);
2442 qla4_8xxx_idc_unlock(ha); 2771 ha->isp_ops->idc_unlock(ha);
2772}
2773
2774static void qla4_82xx_set_idc_ver(struct scsi_qla_host *ha)
2775{
2776 int idc_ver;
2777 uint32_t drv_active;
2778
2779 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
2780 if (drv_active == (1 << (ha->func_num * 4))) {
2781 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION,
2782 QLA82XX_IDC_VERSION);
2783 ql4_printk(KERN_INFO, ha,
2784 "%s: IDC version updated to %d\n", __func__,
2785 QLA82XX_IDC_VERSION);
2786 } else {
2787 idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
2788 if (QLA82XX_IDC_VERSION != idc_ver) {
2789 ql4_printk(KERN_INFO, ha,
2790 "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n",
2791 __func__, QLA82XX_IDC_VERSION, idc_ver);
2792 }
2793 }
2794}
2795
2796static int qla4_83xx_set_idc_ver(struct scsi_qla_host *ha)
2797{
2798 int idc_ver;
2799 uint32_t drv_active;
2800 int rval = QLA_SUCCESS;
2801
2802 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
2803 if (drv_active == (1 << ha->func_num)) {
2804 idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
2805 idc_ver &= (~0xFF);
2806 idc_ver |= QLA83XX_IDC_VER_MAJ_VALUE;
2807 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION, idc_ver);
2808 ql4_printk(KERN_INFO, ha,
2809 "%s: IDC version updated to %d\n", __func__,
2810 idc_ver);
2811 } else {
2812 idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
2813 idc_ver &= 0xFF;
2814 if (QLA83XX_IDC_VER_MAJ_VALUE != idc_ver) {
2815 ql4_printk(KERN_INFO, ha,
2816 "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n",
2817 __func__, QLA83XX_IDC_VER_MAJ_VALUE,
2818 idc_ver);
2819 rval = QLA_ERROR;
2820 goto exit_set_idc_ver;
2821 }
2822 }
2823
2824 /* Update IDC_MINOR_VERSION */
2825 idc_ver = qla4_83xx_rd_reg(ha, QLA83XX_CRB_IDC_VER_MINOR);
2826 idc_ver &= ~(0x03 << (ha->func_num * 2));
2827 idc_ver |= (QLA83XX_IDC_VER_MIN_VALUE << (ha->func_num * 2));
2828 qla4_83xx_wr_reg(ha, QLA83XX_CRB_IDC_VER_MINOR, idc_ver);
2829
2830exit_set_idc_ver:
2831 return rval;
2832}
2833
2834int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha)
2835{
2836 uint32_t drv_active;
2837 int rval = QLA_SUCCESS;
2838
2839 if (test_bit(AF_INIT_DONE, &ha->flags))
2840 goto exit_update_idc_reg;
2841
2842 ha->isp_ops->idc_lock(ha);
2843 qla4_8xxx_set_drv_active(ha);
2844
2845 /*
2846 * If we are the first driver to load and
2847 * ql4xdontresethba is not set, clear IDC_CTRL BIT0.
2848 */
2849 if (is_qla8032(ha)) {
2850 drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
2851 if ((drv_active == (1 << ha->func_num)) && !ql4xdontresethba)
2852 qla4_83xx_clear_idc_dontreset(ha);
2853 }
2854
2855 if (is_qla8022(ha)) {
2856 qla4_82xx_set_idc_ver(ha);
2857 } else if (is_qla8032(ha)) {
2858 rval = qla4_83xx_set_idc_ver(ha);
2859 if (rval == QLA_ERROR)
2860 qla4_8xxx_clear_drv_active(ha);
2861 }
2862
2863 ha->isp_ops->idc_unlock(ha);
2864
2865exit_update_idc_reg:
2866 return rval;
2443} 2867}
2444 2868
2445/** 2869/**
@@ -2454,13 +2878,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
2454 int rval = QLA_SUCCESS; 2878 int rval = QLA_SUCCESS;
2455 unsigned long dev_init_timeout; 2879 unsigned long dev_init_timeout;
2456 2880
2457 if (!test_bit(AF_INIT_DONE, &ha->flags)) { 2881 rval = qla4_8xxx_update_idc_reg(ha);
2458 qla4_8xxx_idc_lock(ha); 2882 if (rval == QLA_ERROR)
2459 qla4_8xxx_set_drv_active(ha); 2883 goto exit_state_handler;
2460 qla4_8xxx_idc_unlock(ha);
2461 }
2462 2884
2463 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2885 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
2464 DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", 2886 DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
2465 dev_state, dev_state < MAX_STATES ? 2887 dev_state, dev_state < MAX_STATES ?
2466 qdev_state[dev_state] : "Unknown")); 2888 qdev_state[dev_state] : "Unknown"));
@@ -2468,7 +2890,7 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
2468 /* wait for 30 seconds for device to go ready */ 2890 /* wait for 30 seconds for device to go ready */
2469 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 2891 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
2470 2892
2471 qla4_8xxx_idc_lock(ha); 2893 ha->isp_ops->idc_lock(ha);
2472 while (1) { 2894 while (1) {
2473 2895
2474 if (time_after_eq(jiffies, dev_init_timeout)) { 2896 if (time_after_eq(jiffies, dev_init_timeout)) {
@@ -2477,65 +2899,75 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
2477 DRIVER_NAME, 2899 DRIVER_NAME,
2478 dev_state, dev_state < MAX_STATES ? 2900 dev_state, dev_state < MAX_STATES ?
2479 qdev_state[dev_state] : "Unknown"); 2901 qdev_state[dev_state] : "Unknown");
2480 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2902 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
2481 QLA82XX_DEV_FAILED); 2903 QLA8XXX_DEV_FAILED);
2482 } 2904 }
2483 2905
2484 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2906 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
2485 ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", 2907 ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
2486 dev_state, dev_state < MAX_STATES ? 2908 dev_state, dev_state < MAX_STATES ?
2487 qdev_state[dev_state] : "Unknown"); 2909 qdev_state[dev_state] : "Unknown");
2488 2910
2489 /* NOTE: Make sure idc unlocked upon exit of switch statement */ 2911 /* NOTE: Make sure idc unlocked upon exit of switch statement */
2490 switch (dev_state) { 2912 switch (dev_state) {
2491 case QLA82XX_DEV_READY: 2913 case QLA8XXX_DEV_READY:
2492 goto exit; 2914 goto exit;
2493 case QLA82XX_DEV_COLD: 2915 case QLA8XXX_DEV_COLD:
2494 rval = qla4_8xxx_device_bootstrap(ha); 2916 rval = qla4_8xxx_device_bootstrap(ha);
2495 goto exit; 2917 goto exit;
2496 case QLA82XX_DEV_INITIALIZING: 2918 case QLA8XXX_DEV_INITIALIZING:
2497 qla4_8xxx_idc_unlock(ha); 2919 ha->isp_ops->idc_unlock(ha);
2498 msleep(1000); 2920 msleep(1000);
2499 qla4_8xxx_idc_lock(ha); 2921 ha->isp_ops->idc_lock(ha);
2500 break; 2922 break;
2501 case QLA82XX_DEV_NEED_RESET: 2923 case QLA8XXX_DEV_NEED_RESET:
2502 if (!ql4xdontresethba) { 2924 /*
2503 qla4_8xxx_need_reset_handler(ha); 2925 * For ISP8324, if NEED_RESET is set by any driver,
2504 /* Update timeout value after need 2926 * it should be honored, irrespective of IDC_CTRL
2505 * reset handler */ 2927 * DONTRESET_BIT0
2506 dev_init_timeout = jiffies + 2928 */
2507 (ha->nx_dev_init_timeout * HZ); 2929 if (is_qla8032(ha)) {
2508 } else { 2930 qla4_83xx_need_reset_handler(ha);
2509 qla4_8xxx_idc_unlock(ha); 2931 } else if (is_qla8022(ha)) {
2510 msleep(1000); 2932 if (!ql4xdontresethba) {
2511 qla4_8xxx_idc_lock(ha); 2933 qla4_82xx_need_reset_handler(ha);
2934 /* Update timeout value after need
2935 * reset handler */
2936 dev_init_timeout = jiffies +
2937 (ha->nx_dev_init_timeout * HZ);
2938 } else {
2939 ha->isp_ops->idc_unlock(ha);
2940 msleep(1000);
2941 ha->isp_ops->idc_lock(ha);
2942 }
2512 } 2943 }
2513 break; 2944 break;
2514 case QLA82XX_DEV_NEED_QUIESCENT: 2945 case QLA8XXX_DEV_NEED_QUIESCENT:
2515 /* idc locked/unlocked in handler */ 2946 /* idc locked/unlocked in handler */
2516 qla4_8xxx_need_qsnt_handler(ha); 2947 qla4_8xxx_need_qsnt_handler(ha);
2517 break; 2948 break;
2518 case QLA82XX_DEV_QUIESCENT: 2949 case QLA8XXX_DEV_QUIESCENT:
2519 qla4_8xxx_idc_unlock(ha); 2950 ha->isp_ops->idc_unlock(ha);
2520 msleep(1000); 2951 msleep(1000);
2521 qla4_8xxx_idc_lock(ha); 2952 ha->isp_ops->idc_lock(ha);
2522 break; 2953 break;
2523 case QLA82XX_DEV_FAILED: 2954 case QLA8XXX_DEV_FAILED:
2524 qla4_8xxx_idc_unlock(ha); 2955 ha->isp_ops->idc_unlock(ha);
2525 qla4xxx_dead_adapter_cleanup(ha); 2956 qla4xxx_dead_adapter_cleanup(ha);
2526 rval = QLA_ERROR; 2957 rval = QLA_ERROR;
2527 qla4_8xxx_idc_lock(ha); 2958 ha->isp_ops->idc_lock(ha);
2528 goto exit; 2959 goto exit;
2529 default: 2960 default:
2530 qla4_8xxx_idc_unlock(ha); 2961 ha->isp_ops->idc_unlock(ha);
2531 qla4xxx_dead_adapter_cleanup(ha); 2962 qla4xxx_dead_adapter_cleanup(ha);
2532 rval = QLA_ERROR; 2963 rval = QLA_ERROR;
2533 qla4_8xxx_idc_lock(ha); 2964 ha->isp_ops->idc_lock(ha);
2534 goto exit; 2965 goto exit;
2535 } 2966 }
2536 } 2967 }
2537exit: 2968exit:
2538 qla4_8xxx_idc_unlock(ha); 2969 ha->isp_ops->idc_unlock(ha);
2970exit_state_handler:
2539 return rval; 2971 return rval;
2540} 2972}
2541 2973
@@ -2544,8 +2976,13 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
2544 int retval; 2976 int retval;
2545 2977
2546 /* clear the interrupt */ 2978 /* clear the interrupt */
2547 writel(0, &ha->qla4_8xxx_reg->host_int); 2979 if (is_qla8032(ha)) {
2548 readl(&ha->qla4_8xxx_reg->host_int); 2980 writel(0, &ha->qla4_83xx_reg->risc_intr);
2981 readl(&ha->qla4_83xx_reg->risc_intr);
2982 } else if (is_qla8022(ha)) {
2983 writel(0, &ha->qla4_82xx_reg->host_int);
2984 readl(&ha->qla4_82xx_reg->host_int);
2985 }
2549 2986
2550 retval = qla4_8xxx_device_state_handler(ha); 2987 retval = qla4_8xxx_device_state_handler(ha);
2551 2988
@@ -2579,13 +3016,13 @@ flash_data_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
2579} 3016}
2580 3017
2581static uint32_t * 3018static uint32_t *
2582qla4_8xxx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr, 3019qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
2583 uint32_t faddr, uint32_t length) 3020 uint32_t faddr, uint32_t length)
2584{ 3021{
2585 uint32_t i; 3022 uint32_t i;
2586 uint32_t val; 3023 uint32_t val;
2587 int loops = 0; 3024 int loops = 0;
2588 while ((qla4_8xxx_rom_lock(ha) != 0) && (loops < 50000)) { 3025 while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
2589 udelay(100); 3026 udelay(100);
2590 cond_resched(); 3027 cond_resched();
2591 loops++; 3028 loops++;
@@ -2597,7 +3034,7 @@ qla4_8xxx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
2597 3034
2598 /* Dword reads to flash. */ 3035 /* Dword reads to flash. */
2599 for (i = 0; i < length/4; i++, faddr += 4) { 3036 for (i = 0; i < length/4; i++, faddr += 4) {
2600 if (qla4_8xxx_do_rom_fast_read(ha, faddr, &val)) { 3037 if (qla4_82xx_do_rom_fast_read(ha, faddr, &val)) {
2601 ql4_printk(KERN_WARNING, ha, 3038 ql4_printk(KERN_WARNING, ha,
2602 "Do ROM fast read failed\n"); 3039 "Do ROM fast read failed\n");
2603 goto done_read; 3040 goto done_read;
@@ -2606,7 +3043,7 @@ qla4_8xxx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
2606 } 3043 }
2607 3044
2608done_read: 3045done_read:
2609 qla4_8xxx_rom_unlock(ha); 3046 qla4_82xx_rom_unlock(ha);
2610 return dwptr; 3047 return dwptr;
2611} 3048}
2612 3049
@@ -2614,10 +3051,10 @@ done_read:
2614 * Address and length are byte address 3051 * Address and length are byte address
2615 **/ 3052 **/
2616static uint8_t * 3053static uint8_t *
2617qla4_8xxx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 3054qla4_82xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
2618 uint32_t offset, uint32_t length) 3055 uint32_t offset, uint32_t length)
2619{ 3056{
2620 qla4_8xxx_read_flash_data(ha, (uint32_t *)buf, offset, length); 3057 qla4_82xx_read_flash_data(ha, (uint32_t *)buf, offset, length);
2621 return buf; 3058 return buf;
2622} 3059}
2623 3060
@@ -2644,7 +3081,7 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
2644 const char *loc, *locations[] = { "DEF", "FLT" }; 3081 const char *loc, *locations[] = { "DEF", "FLT" };
2645 uint16_t *wptr; 3082 uint16_t *wptr;
2646 uint16_t cnt, chksum; 3083 uint16_t cnt, chksum;
2647 uint32_t start; 3084 uint32_t start, status;
2648 struct qla_flt_header *flt; 3085 struct qla_flt_header *flt;
2649 struct qla_flt_region *region; 3086 struct qla_flt_region *region;
2650 struct ql82xx_hw_data *hw = &ha->hw; 3087 struct ql82xx_hw_data *hw = &ha->hw;
@@ -2653,8 +3090,18 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
2653 wptr = (uint16_t *)ha->request_ring; 3090 wptr = (uint16_t *)ha->request_ring;
2654 flt = (struct qla_flt_header *)ha->request_ring; 3091 flt = (struct qla_flt_header *)ha->request_ring;
2655 region = (struct qla_flt_region *)&flt[1]; 3092 region = (struct qla_flt_region *)&flt[1];
2656 qla4_8xxx_read_optrom_data(ha, (uint8_t *)ha->request_ring, 3093
2657 flt_addr << 2, OPTROM_BURST_SIZE); 3094 if (is_qla8022(ha)) {
3095 qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
3096 flt_addr << 2, OPTROM_BURST_SIZE);
3097 } else if (is_qla8032(ha)) {
3098 status = qla4_83xx_flash_read_u32(ha, flt_addr << 2,
3099 (uint8_t *)ha->request_ring,
3100 0x400);
3101 if (status != QLA_SUCCESS)
3102 goto no_flash_data;
3103 }
3104
2658 if (*wptr == __constant_cpu_to_le16(0xffff)) 3105 if (*wptr == __constant_cpu_to_le16(0xffff))
2659 goto no_flash_data; 3106 goto no_flash_data;
2660 if (flt->version != __constant_cpu_to_le16(1)) { 3107 if (flt->version != __constant_cpu_to_le16(1)) {
@@ -2730,7 +3177,7 @@ done:
2730} 3177}
2731 3178
2732static void 3179static void
2733qla4_8xxx_get_fdt_info(struct scsi_qla_host *ha) 3180qla4_82xx_get_fdt_info(struct scsi_qla_host *ha)
2734{ 3181{
2735#define FLASH_BLK_SIZE_4K 0x1000 3182#define FLASH_BLK_SIZE_4K 0x1000
2736#define FLASH_BLK_SIZE_32K 0x8000 3183#define FLASH_BLK_SIZE_32K 0x8000
@@ -2748,7 +3195,7 @@ qla4_8xxx_get_fdt_info(struct scsi_qla_host *ha)
2748 3195
2749 wptr = (uint16_t *)ha->request_ring; 3196 wptr = (uint16_t *)ha->request_ring;
2750 fdt = (struct qla_fdt_layout *)ha->request_ring; 3197 fdt = (struct qla_fdt_layout *)ha->request_ring;
2751 qla4_8xxx_read_optrom_data(ha, (uint8_t *)ha->request_ring, 3198 qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
2752 hw->flt_region_fdt << 2, OPTROM_BURST_SIZE); 3199 hw->flt_region_fdt << 2, OPTROM_BURST_SIZE);
2753 3200
2754 if (*wptr == __constant_cpu_to_le16(0xffff)) 3201 if (*wptr == __constant_cpu_to_le16(0xffff))
@@ -2797,7 +3244,7 @@ done:
2797} 3244}
2798 3245
2799static void 3246static void
2800qla4_8xxx_get_idc_param(struct scsi_qla_host *ha) 3247qla4_82xx_get_idc_param(struct scsi_qla_host *ha)
2801{ 3248{
2802#define QLA82XX_IDC_PARAM_ADDR 0x003e885c 3249#define QLA82XX_IDC_PARAM_ADDR 0x003e885c
2803 uint32_t *wptr; 3250 uint32_t *wptr;
@@ -2805,7 +3252,7 @@ qla4_8xxx_get_idc_param(struct scsi_qla_host *ha)
2805 if (!is_qla8022(ha)) 3252 if (!is_qla8022(ha))
2806 return; 3253 return;
2807 wptr = (uint32_t *)ha->request_ring; 3254 wptr = (uint32_t *)ha->request_ring;
2808 qla4_8xxx_read_optrom_data(ha, (uint8_t *)ha->request_ring, 3255 qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
2809 QLA82XX_IDC_PARAM_ADDR , 8); 3256 QLA82XX_IDC_PARAM_ADDR , 8);
2810 3257
2811 if (*wptr == __constant_cpu_to_le32(0xffffffff)) { 3258 if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
@@ -2823,6 +3270,39 @@ qla4_8xxx_get_idc_param(struct scsi_qla_host *ha)
2823 return; 3270 return;
2824} 3271}
2825 3272
3273void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
3274 int in_count)
3275{
3276 int i;
3277
3278 /* Load all mailbox registers, except mailbox 0. */
3279 for (i = 1; i < in_count; i++)
3280 writel(mbx_cmd[i], &ha->qla4_82xx_reg->mailbox_in[i]);
3281
3282 /* Wakeup firmware */
3283 writel(mbx_cmd[0], &ha->qla4_82xx_reg->mailbox_in[0]);
3284 readl(&ha->qla4_82xx_reg->mailbox_in[0]);
3285 writel(HINT_MBX_INT_PENDING, &ha->qla4_82xx_reg->hint);
3286 readl(&ha->qla4_82xx_reg->hint);
3287}
3288
3289void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
3290{
3291 int intr_status;
3292
3293 intr_status = readl(&ha->qla4_82xx_reg->host_int);
3294 if (intr_status & ISRX_82XX_RISC_INT) {
3295 ha->mbox_status_count = out_count;
3296 intr_status = readl(&ha->qla4_82xx_reg->host_status);
3297 ha->isp_ops->interrupt_service_routine(ha, intr_status);
3298
3299 if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
3300 test_bit(AF_INTx_ENABLED, &ha->flags))
3301 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
3302 0xfbff);
3303 }
3304}
3305
2826int 3306int
2827qla4_8xxx_get_flash_info(struct scsi_qla_host *ha) 3307qla4_8xxx_get_flash_info(struct scsi_qla_host *ha)
2828{ 3308{
@@ -2834,8 +3314,12 @@ qla4_8xxx_get_flash_info(struct scsi_qla_host *ha)
2834 return ret; 3314 return ret;
2835 3315
2836 qla4_8xxx_get_flt_info(ha, flt_addr); 3316 qla4_8xxx_get_flt_info(ha, flt_addr);
2837 qla4_8xxx_get_fdt_info(ha); 3317 if (is_qla8022(ha)) {
2838 qla4_8xxx_get_idc_param(ha); 3318 qla4_82xx_get_fdt_info(ha);
3319 qla4_82xx_get_idc_param(ha);
3320 } else if (is_qla8032(ha)) {
3321 qla4_83xx_get_idc_param(ha);
3322 }
2839 3323
2840 return QLA_SUCCESS; 3324 return QLA_SUCCESS;
2841} 3325}
@@ -2869,36 +3353,36 @@ qla4_8xxx_stop_firmware(struct scsi_qla_host *ha)
2869} 3353}
2870 3354
2871/** 3355/**
2872 * qla4_8xxx_isp_reset - Resets ISP and aborts all outstanding commands. 3356 * qla4_82xx_isp_reset - Resets ISP and aborts all outstanding commands.
2873 * @ha: pointer to host adapter structure. 3357 * @ha: pointer to host adapter structure.
2874 **/ 3358 **/
2875int 3359int
2876qla4_8xxx_isp_reset(struct scsi_qla_host *ha) 3360qla4_82xx_isp_reset(struct scsi_qla_host *ha)
2877{ 3361{
2878 int rval; 3362 int rval;
2879 uint32_t dev_state; 3363 uint32_t dev_state;
2880 3364
2881 qla4_8xxx_idc_lock(ha); 3365 qla4_82xx_idc_lock(ha);
2882 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3366 dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
2883 3367
2884 if (dev_state == QLA82XX_DEV_READY) { 3368 if (dev_state == QLA8XXX_DEV_READY) {
2885 ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n"); 3369 ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
2886 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3370 qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2887 QLA82XX_DEV_NEED_RESET); 3371 QLA8XXX_DEV_NEED_RESET);
2888 set_bit(AF_82XX_RST_OWNER, &ha->flags); 3372 set_bit(AF_8XXX_RST_OWNER, &ha->flags);
2889 } else 3373 } else
2890 ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n"); 3374 ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
2891 3375
2892 qla4_8xxx_idc_unlock(ha); 3376 qla4_82xx_idc_unlock(ha);
2893 3377
2894 rval = qla4_8xxx_device_state_handler(ha); 3378 rval = qla4_8xxx_device_state_handler(ha);
2895 3379
2896 qla4_8xxx_idc_lock(ha); 3380 qla4_82xx_idc_lock(ha);
2897 qla4_8xxx_clear_rst_ready(ha); 3381 qla4_8xxx_clear_rst_ready(ha);
2898 qla4_8xxx_idc_unlock(ha); 3382 qla4_82xx_idc_unlock(ha);
2899 3383
2900 if (rval == QLA_SUCCESS) { 3384 if (rval == QLA_SUCCESS) {
2901 ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_8xxx_isp_reset\n"); 3385 ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_82xx_isp_reset\n");
2902 clear_bit(AF_FW_RECOVERY, &ha->flags); 3386 clear_bit(AF_FW_RECOVERY, &ha->flags);
2903 } 3387 }
2904 3388
@@ -2979,8 +3463,7 @@ exit_validate_mac82:
2979 3463
2980/* Interrupt handling helpers. */ 3464/* Interrupt handling helpers. */
2981 3465
2982static int 3466int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
2983qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
2984{ 3467{
2985 uint32_t mbox_cmd[MBOX_REG_COUNT]; 3468 uint32_t mbox_cmd[MBOX_REG_COUNT];
2986 uint32_t mbox_sts[MBOX_REG_COUNT]; 3469 uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3001,8 +3484,7 @@ qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
3001 return QLA_SUCCESS; 3484 return QLA_SUCCESS;
3002} 3485}
3003 3486
3004static int 3487int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
3005qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
3006{ 3488{
3007 uint32_t mbox_cmd[MBOX_REG_COUNT]; 3489 uint32_t mbox_cmd[MBOX_REG_COUNT];
3008 uint32_t mbox_sts[MBOX_REG_COUNT]; 3490 uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3025,26 +3507,26 @@ qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
3025} 3507}
3026 3508
3027void 3509void
3028qla4_8xxx_enable_intrs(struct scsi_qla_host *ha) 3510qla4_82xx_enable_intrs(struct scsi_qla_host *ha)
3029{ 3511{
3030 qla4_8xxx_mbx_intr_enable(ha); 3512 qla4_8xxx_mbx_intr_enable(ha);
3031 3513
3032 spin_lock_irq(&ha->hardware_lock); 3514 spin_lock_irq(&ha->hardware_lock);
3033 /* BIT 10 - reset */ 3515 /* BIT 10 - reset */
3034 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); 3516 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
3035 spin_unlock_irq(&ha->hardware_lock); 3517 spin_unlock_irq(&ha->hardware_lock);
3036 set_bit(AF_INTERRUPTS_ON, &ha->flags); 3518 set_bit(AF_INTERRUPTS_ON, &ha->flags);
3037} 3519}
3038 3520
3039void 3521void
3040qla4_8xxx_disable_intrs(struct scsi_qla_host *ha) 3522qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
3041{ 3523{
3042 if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags)) 3524 if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
3043 qla4_8xxx_mbx_intr_disable(ha); 3525 qla4_8xxx_mbx_intr_disable(ha);
3044 3526
3045 spin_lock_irq(&ha->hardware_lock); 3527 spin_lock_irq(&ha->hardware_lock);
3046 /* BIT 10 - set */ 3528 /* BIT 10 - set */
3047 qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); 3529 qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
3048 spin_unlock_irq(&ha->hardware_lock); 3530 spin_unlock_irq(&ha->hardware_lock);
3049} 3531}
3050 3532
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 30258479f10..9dc0bbfe50d 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -25,6 +25,8 @@
25#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) 25#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
26#define CRB_DMA_SHIFT QLA82XX_REG(0xcc) 26#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
27#define CRB_TEMP_STATE QLA82XX_REG(0x1b4) 27#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
28#define CRB_CMDPEG_CHECK_RETRY_COUNT 60
29#define CRB_CMDPEG_CHECK_DELAY 500
28 30
29#define qla82xx_get_temp_val(x) ((x) >> 16) 31#define qla82xx_get_temp_val(x) ((x) >> 16)
30#define qla82xx_get_temp_state(x) ((x) & 0xffff) 32#define qla82xx_get_temp_state(x) ((x) & 0xffff)
@@ -490,8 +492,8 @@ enum {
490 * Base addresses of major components on-chip. 492 * Base addresses of major components on-chip.
491 * ====================== BASE ADDRESSES ON-CHIP ====================== 493 * ====================== BASE ADDRESSES ON-CHIP ======================
492 */ 494 */
493#define QLA82XX_ADDR_DDR_NET (0x0000000000000000ULL) 495#define QLA8XXX_ADDR_DDR_NET (0x0000000000000000ULL)
494#define QLA82XX_ADDR_DDR_NET_MAX (0x000000000fffffffULL) 496#define QLA8XXX_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
495 497
496/* Imbus address bit used to indicate a host address. This bit is 498/* Imbus address bit used to indicate a host address. This bit is
497 * eliminated by the pcie bar and bar select before presentation 499 * eliminated by the pcie bar and bar select before presentation
@@ -500,14 +502,15 @@ enum {
500#define QLA82XX_P2_ADDR_PCIE (0x0000000800000000ULL) 502#define QLA82XX_P2_ADDR_PCIE (0x0000000800000000ULL)
501#define QLA82XX_P3_ADDR_PCIE (0x0000008000000000ULL) 503#define QLA82XX_P3_ADDR_PCIE (0x0000008000000000ULL)
502#define QLA82XX_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL) 504#define QLA82XX_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL)
503#define QLA82XX_ADDR_OCM0 (0x0000000200000000ULL) 505#define QLA8XXX_ADDR_OCM0 (0x0000000200000000ULL)
504#define QLA82XX_ADDR_OCM0_MAX (0x00000002000fffffULL) 506#define QLA8XXX_ADDR_OCM0_MAX (0x00000002000fffffULL)
505#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL) 507#define QLA8XXX_ADDR_OCM1 (0x0000000200400000ULL)
506#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL) 508#define QLA8XXX_ADDR_OCM1_MAX (0x00000002004fffffULL)
507#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL) 509#define QLA8XXX_ADDR_QDR_NET (0x0000000300000000ULL)
508 510
509#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL) 511#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
510#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL) 512#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
513#define QLA8XXX_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
511 514
512#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000 515#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000
513#define QLA82XX_PCI_DIRECT_CRB (unsigned long)0x04400000 516#define QLA82XX_PCI_DIRECT_CRB (unsigned long)0x04400000
@@ -517,6 +520,10 @@ enum {
517#define QLA82XX_PCI_QDR_NET (unsigned long)0x04000000 520#define QLA82XX_PCI_QDR_NET (unsigned long)0x04000000
518#define QLA82XX_PCI_QDR_NET_MAX (unsigned long)0x043fffff 521#define QLA82XX_PCI_QDR_NET_MAX (unsigned long)0x043fffff
519 522
523/* PCI Windowing for DDR regions. */
524#define QLA8XXX_ADDR_IN_RANGE(addr, low, high) \
525 (((addr) <= (high)) && ((addr) >= (low)))
526
520/* 527/*
521 * Register offsets for MN 528 * Register offsets for MN
522 */ 529 */
@@ -540,6 +547,11 @@ enum {
540#define MIU_TA_CTL_WRITE 4 547#define MIU_TA_CTL_WRITE 4
541#define MIU_TA_CTL_BUSY 8 548#define MIU_TA_CTL_BUSY 8
542 549
550#define MIU_TA_CTL_WRITE_ENABLE (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE)
551#define MIU_TA_CTL_WRITE_START (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE |\
552 MIU_TA_CTL_START)
553#define MIU_TA_CTL_START_ENABLE (MIU_TA_CTL_START | MIU_TA_CTL_ENABLE)
554
543/*CAM RAM */ 555/*CAM RAM */
544# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000) 556# define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000)
545# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg)) 557# define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg))
@@ -565,20 +577,53 @@ enum {
565/* Driver Coexistence Defines */ 577/* Driver Coexistence Defines */
566#define QLA82XX_CRB_DRV_ACTIVE (QLA82XX_CAM_RAM(0x138)) 578#define QLA82XX_CRB_DRV_ACTIVE (QLA82XX_CAM_RAM(0x138))
567#define QLA82XX_CRB_DEV_STATE (QLA82XX_CAM_RAM(0x140)) 579#define QLA82XX_CRB_DEV_STATE (QLA82XX_CAM_RAM(0x140))
568#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
569#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
570#define QLA82XX_CRB_DRV_STATE (QLA82XX_CAM_RAM(0x144)) 580#define QLA82XX_CRB_DRV_STATE (QLA82XX_CAM_RAM(0x144))
571#define QLA82XX_CRB_DRV_SCRATCH (QLA82XX_CAM_RAM(0x148)) 581#define QLA82XX_CRB_DRV_SCRATCH (QLA82XX_CAM_RAM(0x148))
572#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c)) 582#define QLA82XX_CRB_DEV_PART_INFO (QLA82XX_CAM_RAM(0x14c))
583#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
584
585enum qla_regs {
586 QLA8XXX_PEG_HALT_STATUS1 = 0,
587 QLA8XXX_PEG_HALT_STATUS2,
588 QLA8XXX_PEG_ALIVE_COUNTER,
589 QLA8XXX_CRB_DRV_ACTIVE,
590 QLA8XXX_CRB_DEV_STATE,
591 QLA8XXX_CRB_DRV_STATE,
592 QLA8XXX_CRB_DRV_SCRATCH,
593 QLA8XXX_CRB_DEV_PART_INFO,
594 QLA8XXX_CRB_DRV_IDC_VERSION,
595 QLA8XXX_FW_VERSION_MAJOR,
596 QLA8XXX_FW_VERSION_MINOR,
597 QLA8XXX_FW_VERSION_SUB,
598 QLA8XXX_CRB_CMDPEG_STATE,
599 QLA8XXX_CRB_TEMP_STATE,
600};
601
602static const uint32_t qla4_82xx_reg_tbl[] = {
603 QLA82XX_PEG_HALT_STATUS1,
604 QLA82XX_PEG_HALT_STATUS2,
605 QLA82XX_PEG_ALIVE_COUNTER,
606 QLA82XX_CRB_DRV_ACTIVE,
607 QLA82XX_CRB_DEV_STATE,
608 QLA82XX_CRB_DRV_STATE,
609 QLA82XX_CRB_DRV_SCRATCH,
610 QLA82XX_CRB_DEV_PART_INFO,
611 QLA82XX_CRB_DRV_IDC_VERSION,
612 QLA82XX_FW_VERSION_MAJOR,
613 QLA82XX_FW_VERSION_MINOR,
614 QLA82XX_FW_VERSION_SUB,
615 CRB_CMDPEG_STATE,
616 CRB_TEMP_STATE,
617};
573 618
574/* Every driver should use these Device State */ 619/* Every driver should use these Device State */
575#define QLA82XX_DEV_COLD 1 620#define QLA8XXX_DEV_COLD 1
576#define QLA82XX_DEV_INITIALIZING 2 621#define QLA8XXX_DEV_INITIALIZING 2
577#define QLA82XX_DEV_READY 3 622#define QLA8XXX_DEV_READY 3
578#define QLA82XX_DEV_NEED_RESET 4 623#define QLA8XXX_DEV_NEED_RESET 4
579#define QLA82XX_DEV_NEED_QUIESCENT 5 624#define QLA8XXX_DEV_NEED_QUIESCENT 5
580#define QLA82XX_DEV_FAILED 6 625#define QLA8XXX_DEV_FAILED 6
581#define QLA82XX_DEV_QUIESCENT 7 626#define QLA8XXX_DEV_QUIESCENT 7
582#define MAX_STATES 8 /* Increment if new state added */ 627#define MAX_STATES 8 /* Increment if new state added */
583 628
584#define QLA82XX_IDC_VERSION 0x1 629#define QLA82XX_IDC_VERSION 0x1
@@ -795,47 +840,51 @@ struct crb_addr_pair {
795/* Minidump related */ 840/* Minidump related */
796 841
797/* Entry Type Defines */ 842/* Entry Type Defines */
798#define QLA82XX_RDNOP 0 843#define QLA8XXX_RDNOP 0
799#define QLA82XX_RDCRB 1 844#define QLA8XXX_RDCRB 1
800#define QLA82XX_RDMUX 2 845#define QLA8XXX_RDMUX 2
801#define QLA82XX_QUEUE 3 846#define QLA8XXX_QUEUE 3
802#define QLA82XX_BOARD 4 847#define QLA8XXX_BOARD 4
803#define QLA82XX_RDOCM 6 848#define QLA8XXX_RDOCM 6
804#define QLA82XX_PREGS 7 849#define QLA8XXX_PREGS 7
805#define QLA82XX_L1DTG 8 850#define QLA8XXX_L1DTG 8
806#define QLA82XX_L1ITG 9 851#define QLA8XXX_L1ITG 9
807#define QLA82XX_L1DAT 11 852#define QLA8XXX_L1DAT 11
808#define QLA82XX_L1INS 12 853#define QLA8XXX_L1INS 12
809#define QLA82XX_L2DTG 21 854#define QLA8XXX_L2DTG 21
810#define QLA82XX_L2ITG 22 855#define QLA8XXX_L2ITG 22
811#define QLA82XX_L2DAT 23 856#define QLA8XXX_L2DAT 23
812#define QLA82XX_L2INS 24 857#define QLA8XXX_L2INS 24
813#define QLA82XX_RDROM 71 858#define QLA83XX_POLLRD 35
814#define QLA82XX_RDMEM 72 859#define QLA83XX_RDMUX2 36
815#define QLA82XX_CNTRL 98 860#define QLA83XX_POLLRDMWR 37
816#define QLA82XX_RDEND 255 861#define QLA8XXX_RDROM 71
862#define QLA8XXX_RDMEM 72
863#define QLA8XXX_CNTRL 98
864#define QLA83XX_TLHDR 99
865#define QLA8XXX_RDEND 255
817 866
818/* Opcodes for Control Entries. 867/* Opcodes for Control Entries.
819 * These Flags are bit fields. 868 * These Flags are bit fields.
820 */ 869 */
821#define QLA82XX_DBG_OPCODE_WR 0x01 870#define QLA8XXX_DBG_OPCODE_WR 0x01
822#define QLA82XX_DBG_OPCODE_RW 0x02 871#define QLA8XXX_DBG_OPCODE_RW 0x02
823#define QLA82XX_DBG_OPCODE_AND 0x04 872#define QLA8XXX_DBG_OPCODE_AND 0x04
824#define QLA82XX_DBG_OPCODE_OR 0x08 873#define QLA8XXX_DBG_OPCODE_OR 0x08
825#define QLA82XX_DBG_OPCODE_POLL 0x10 874#define QLA8XXX_DBG_OPCODE_POLL 0x10
826#define QLA82XX_DBG_OPCODE_RDSTATE 0x20 875#define QLA8XXX_DBG_OPCODE_RDSTATE 0x20
827#define QLA82XX_DBG_OPCODE_WRSTATE 0x40 876#define QLA8XXX_DBG_OPCODE_WRSTATE 0x40
828#define QLA82XX_DBG_OPCODE_MDSTATE 0x80 877#define QLA8XXX_DBG_OPCODE_MDSTATE 0x80
829 878
830/* Driver Flags */ 879/* Driver Flags */
831#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */ 880#define QLA8XXX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
832#define QLA82XX_DBG_SIZE_ERR_FLAG 0x40 /* Entry vs Capture size 881#define QLA8XXX_DBG_SIZE_ERR_FLAG 0x40 /* Entry vs Capture size
833 * mismatch */ 882 * mismatch */
834 883
835/* Driver_code is for driver to write some info about the entry 884/* Driver_code is for driver to write some info about the entry
836 * currently not used. 885 * currently not used.
837 */ 886 */
838struct qla82xx_minidump_entry_hdr { 887struct qla8xxx_minidump_entry_hdr {
839 uint32_t entry_type; 888 uint32_t entry_type;
840 uint32_t entry_size; 889 uint32_t entry_size;
841 uint32_t entry_capture_size; 890 uint32_t entry_capture_size;
@@ -848,8 +897,8 @@ struct qla82xx_minidump_entry_hdr {
848}; 897};
849 898
850/* Read CRB entry header */ 899/* Read CRB entry header */
851struct qla82xx_minidump_entry_crb { 900struct qla8xxx_minidump_entry_crb {
852 struct qla82xx_minidump_entry_hdr h; 901 struct qla8xxx_minidump_entry_hdr h;
853 uint32_t addr; 902 uint32_t addr;
854 struct { 903 struct {
855 uint8_t addr_stride; 904 uint8_t addr_stride;
@@ -871,8 +920,8 @@ struct qla82xx_minidump_entry_crb {
871 uint32_t value_3; 920 uint32_t value_3;
872}; 921};
873 922
874struct qla82xx_minidump_entry_cache { 923struct qla8xxx_minidump_entry_cache {
875 struct qla82xx_minidump_entry_hdr h; 924 struct qla8xxx_minidump_entry_hdr h;
876 uint32_t tag_reg_addr; 925 uint32_t tag_reg_addr;
877 struct { 926 struct {
878 uint16_t tag_value_stride; 927 uint16_t tag_value_stride;
@@ -895,8 +944,8 @@ struct qla82xx_minidump_entry_cache {
895}; 944};
896 945
897/* Read OCM */ 946/* Read OCM */
898struct qla82xx_minidump_entry_rdocm { 947struct qla8xxx_minidump_entry_rdocm {
899 struct qla82xx_minidump_entry_hdr h; 948 struct qla8xxx_minidump_entry_hdr h;
900 uint32_t rsvd_0; 949 uint32_t rsvd_0;
901 uint32_t rsvd_1; 950 uint32_t rsvd_1;
902 uint32_t data_size; 951 uint32_t data_size;
@@ -908,24 +957,24 @@ struct qla82xx_minidump_entry_rdocm {
908}; 957};
909 958
910/* Read Memory */ 959/* Read Memory */
911struct qla82xx_minidump_entry_rdmem { 960struct qla8xxx_minidump_entry_rdmem {
912 struct qla82xx_minidump_entry_hdr h; 961 struct qla8xxx_minidump_entry_hdr h;
913 uint32_t rsvd[6]; 962 uint32_t rsvd[6];
914 uint32_t read_addr; 963 uint32_t read_addr;
915 uint32_t read_data_size; 964 uint32_t read_data_size;
916}; 965};
917 966
918/* Read ROM */ 967/* Read ROM */
919struct qla82xx_minidump_entry_rdrom { 968struct qla8xxx_minidump_entry_rdrom {
920 struct qla82xx_minidump_entry_hdr h; 969 struct qla8xxx_minidump_entry_hdr h;
921 uint32_t rsvd[6]; 970 uint32_t rsvd[6];
922 uint32_t read_addr; 971 uint32_t read_addr;
923 uint32_t read_data_size; 972 uint32_t read_data_size;
924}; 973};
925 974
926/* Mux entry */ 975/* Mux entry */
927struct qla82xx_minidump_entry_mux { 976struct qla8xxx_minidump_entry_mux {
928 struct qla82xx_minidump_entry_hdr h; 977 struct qla8xxx_minidump_entry_hdr h;
929 uint32_t select_addr; 978 uint32_t select_addr;
930 uint32_t rsvd_0; 979 uint32_t rsvd_0;
931 uint32_t data_size; 980 uint32_t data_size;
@@ -937,8 +986,8 @@ struct qla82xx_minidump_entry_mux {
937}; 986};
938 987
939/* Queue entry */ 988/* Queue entry */
940struct qla82xx_minidump_entry_queue { 989struct qla8xxx_minidump_entry_queue {
941 struct qla82xx_minidump_entry_hdr h; 990 struct qla8xxx_minidump_entry_hdr h;
942 uint32_t select_addr; 991 uint32_t select_addr;
943 struct { 992 struct {
944 uint16_t queue_id_stride; 993 uint16_t queue_id_stride;
@@ -956,23 +1005,6 @@ struct qla82xx_minidump_entry_queue {
956 } rd_strd; 1005 } rd_strd;
957}; 1006};
958 1007
959#define QLA82XX_MINIDUMP_OCM0_SIZE (256 * 1024)
960#define QLA82XX_MINIDUMP_L1C_SIZE (256 * 1024)
961#define QLA82XX_MINIDUMP_L2C_SIZE 1572864
962#define QLA82XX_MINIDUMP_COMMON_STR_SIZE 0
963#define QLA82XX_MINIDUMP_FCOE_STR_SIZE 0
964#define QLA82XX_MINIDUMP_MEM_SIZE 0
965#define QLA82XX_MAX_ENTRY_HDR 4
966
967struct qla82xx_minidump {
968 uint32_t md_ocm0_data[QLA82XX_MINIDUMP_OCM0_SIZE];
969 uint32_t md_l1c_data[QLA82XX_MINIDUMP_L1C_SIZE];
970 uint32_t md_l2c_data[QLA82XX_MINIDUMP_L2C_SIZE];
971 uint32_t md_cs_data[QLA82XX_MINIDUMP_COMMON_STR_SIZE];
972 uint32_t md_fcoes_data[QLA82XX_MINIDUMP_FCOE_STR_SIZE];
973 uint32_t md_mem_data[QLA82XX_MINIDUMP_MEM_SIZE];
974};
975
976#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129 1008#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
977#define RQST_TMPLT_SIZE 0x0 1009#define RQST_TMPLT_SIZE 0x0
978#define RQST_TMPLT 0x1 1010#define RQST_TMPLT 0x1
@@ -982,6 +1014,16 @@ struct qla82xx_minidump {
982#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 1014#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
983#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 1015#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
984 1016
1017#define MD_MIU_TEST_AGT_WRDATA_LO 0x410000A0
1018#define MD_MIU_TEST_AGT_WRDATA_HI 0x410000A4
1019#define MD_MIU_TEST_AGT_WRDATA_ULO 0x410000B0
1020#define MD_MIU_TEST_AGT_WRDATA_UHI 0x410000B4
1021
1022#define MD_MIU_TEST_AGT_RDDATA_LO 0x410000A8
1023#define MD_MIU_TEST_AGT_RDDATA_HI 0x410000AC
1024#define MD_MIU_TEST_AGT_RDDATA_ULO 0x410000B8
1025#define MD_MIU_TEST_AGT_RDDATA_UHI 0x410000BC
1026
985static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 1027static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
986 0x410000AC, 0x410000B8, 0x410000BC }; 1028 0x410000AC, 0x410000B8, 0x410000BC };
987#endif 1029#endif
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 79243b76d17..fbc546e893a 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
@@ -18,6 +18,7 @@
18#include "ql4_glbl.h" 18#include "ql4_glbl.h"
19#include "ql4_dbg.h" 19#include "ql4_dbg.h"
20#include "ql4_inline.h" 20#include "ql4_inline.h"
21#include "ql4_83xx.h"
21 22
22/* 23/*
23 * Driver version 24 * Driver version
@@ -160,7 +161,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
160static int qla4xxx_slave_alloc(struct scsi_device *device); 161static int qla4xxx_slave_alloc(struct scsi_device *device);
161static int qla4xxx_slave_configure(struct scsi_device *device); 162static int qla4xxx_slave_configure(struct scsi_device *device);
162static void qla4xxx_slave_destroy(struct scsi_device *sdev); 163static void qla4xxx_slave_destroy(struct scsi_device *sdev);
163static umode_t ql4_attr_is_visible(int param_type, int param); 164static umode_t qla4_attr_is_visible(int param_type, int param);
164static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); 165static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
165static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth, 166static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
166 int reason); 167 int reason);
@@ -203,7 +204,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
203 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST | 204 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
204 CAP_DATADGST | CAP_LOGIN_OFFLOAD | 205 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
205 CAP_MULTI_R2T, 206 CAP_MULTI_R2T,
206 .attr_is_visible = ql4_attr_is_visible, 207 .attr_is_visible = qla4_attr_is_visible,
207 .create_session = qla4xxx_session_create, 208 .create_session = qla4xxx_session_create,
208 .destroy_session = qla4xxx_session_destroy, 209 .destroy_session = qla4xxx_session_destroy,
209 .start_conn = qla4xxx_conn_start, 210 .start_conn = qla4xxx_conn_start,
@@ -315,7 +316,7 @@ exit_send_ping:
315 return rval; 316 return rval;
316} 317}
317 318
318static umode_t ql4_attr_is_visible(int param_type, int param) 319static umode_t qla4_attr_is_visible(int param_type, int param)
319{ 320{
320 switch (param_type) { 321 switch (param_type) {
321 case ISCSI_HOST_PARAM: 322 case ISCSI_HOST_PARAM:
@@ -1366,7 +1367,7 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
1366 1367
1367 conn = cls_conn->dd_data; 1368 conn = cls_conn->dd_data;
1368 qla_conn = conn->dd_data; 1369 qla_conn = conn->dd_data;
1369 dst_addr = &qla_conn->qla_ep->dst_addr; 1370 dst_addr = (struct sockaddr *)&qla_conn->qla_ep->dst_addr;
1370 1371
1371 switch (param) { 1372 switch (param) {
1372 case ISCSI_PARAM_CONN_PORT: 1373 case ISCSI_PARAM_CONN_PORT:
@@ -2315,8 +2316,17 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2315 if (ha->nx_pcibase) 2316 if (ha->nx_pcibase)
2316 iounmap( 2317 iounmap(
2317 (struct device_reg_82xx __iomem *)ha->nx_pcibase); 2318 (struct device_reg_82xx __iomem *)ha->nx_pcibase);
2318 } else if (ha->reg) 2319 } else if (is_qla8032(ha)) {
2320 if (ha->nx_pcibase)
2321 iounmap(
2322 (struct device_reg_83xx __iomem *)ha->nx_pcibase);
2323 } else if (ha->reg) {
2319 iounmap(ha->reg); 2324 iounmap(ha->reg);
2325 }
2326
2327 if (ha->reset_tmplt.buff)
2328 vfree(ha->reset_tmplt.buff);
2329
2320 pci_release_regions(ha->pdev); 2330 pci_release_regions(ha->pdev);
2321} 2331}
2322 2332
@@ -2420,7 +2430,7 @@ static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
2420 uint32_t temp, temp_state, temp_val; 2430 uint32_t temp, temp_state, temp_val;
2421 int status = QLA_SUCCESS; 2431 int status = QLA_SUCCESS;
2422 2432
2423 temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE); 2433 temp = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_TEMP_STATE);
2424 2434
2425 temp_state = qla82xx_get_temp_state(temp); 2435 temp_state = qla82xx_get_temp_state(temp);
2426 temp_val = qla82xx_get_temp_val(temp); 2436 temp_val = qla82xx_get_temp_val(temp);
@@ -2456,7 +2466,8 @@ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2456 uint32_t fw_heartbeat_counter; 2466 uint32_t fw_heartbeat_counter;
2457 int status = QLA_SUCCESS; 2467 int status = QLA_SUCCESS;
2458 2468
2459 fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); 2469 fw_heartbeat_counter = qla4_8xxx_rd_direct(ha,
2470 QLA8XXX_PEG_ALIVE_COUNTER);
2460 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ 2471 /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
2461 if (fw_heartbeat_counter == 0xffffffff) { 2472 if (fw_heartbeat_counter == 0xffffffff) {
2462 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen " 2473 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
@@ -2470,28 +2481,7 @@ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2470 /* FW not alive after 2 seconds */ 2481 /* FW not alive after 2 seconds */
2471 if (ha->seconds_since_last_heartbeat == 2) { 2482 if (ha->seconds_since_last_heartbeat == 2) {
2472 ha->seconds_since_last_heartbeat = 0; 2483 ha->seconds_since_last_heartbeat = 0;
2473 2484 qla4_8xxx_dump_peg_reg(ha);
2474 ql4_printk(KERN_INFO, ha,
2475 "scsi(%ld): %s, Dumping hw/fw registers:\n "
2476 " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2:"
2477 " 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
2478 " 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
2479 " 0x%x,\n PEG_NET_4_PC: 0x%x\n",
2480 ha->host_no, __func__,
2481 qla4_8xxx_rd_32(ha,
2482 QLA82XX_PEG_HALT_STATUS1),
2483 qla4_8xxx_rd_32(ha,
2484 QLA82XX_PEG_HALT_STATUS2),
2485 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
2486 0x3c),
2487 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 +
2488 0x3c),
2489 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 +
2490 0x3c),
2491 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 +
2492 0x3c),
2493 qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
2494 0x3c));
2495 status = QLA_ERROR; 2485 status = QLA_ERROR;
2496 } 2486 }
2497 } else 2487 } else
@@ -2501,6 +2491,48 @@ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2501 return status; 2491 return status;
2502} 2492}
2503 2493
2494static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
2495{
2496 uint32_t halt_status;
2497 int halt_status_unrecoverable = 0;
2498
2499 halt_status = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_HALT_STATUS1);
2500
2501 if (is_qla8022(ha)) {
2502 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
2503 __func__);
2504 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2505 CRB_NIU_XG_PAUSE_CTL_P0 |
2506 CRB_NIU_XG_PAUSE_CTL_P1);
2507
2508 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
2509 ql4_printk(KERN_ERR, ha, "%s: Firmware aborted with error code 0x00006700. Device is being reset\n",
2510 __func__);
2511 if (halt_status & HALT_STATUS_UNRECOVERABLE)
2512 halt_status_unrecoverable = 1;
2513 } else if (is_qla8032(ha)) {
2514 if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
2515 ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
2516 __func__);
2517 else if (halt_status & QLA83XX_HALT_STATUS_UNRECOVERABLE)
2518 halt_status_unrecoverable = 1;
2519 }
2520
2521 /*
2522 * Since we cannot change dev_state in interrupt context,
2523 * set appropriate DPC flag then wakeup DPC
2524 */
2525 if (halt_status_unrecoverable) {
2526 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2527 } else {
2528 ql4_printk(KERN_INFO, ha, "%s: detect abort needed!\n",
2529 __func__);
2530 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2531 }
2532 qla4xxx_mailbox_premature_completion(ha);
2533 qla4xxx_wake_dpc(ha);
2534}
2535
2504/** 2536/**
2505 * qla4_8xxx_watchdog - Poll dev state 2537 * qla4_8xxx_watchdog - Poll dev state
2506 * @ha: Pointer to host adapter structure. 2538 * @ha: Pointer to host adapter structure.
@@ -2509,31 +2541,33 @@ static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
2509 **/ 2541 **/
2510void qla4_8xxx_watchdog(struct scsi_qla_host *ha) 2542void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2511{ 2543{
2512 uint32_t dev_state, halt_status; 2544 uint32_t dev_state;
2513 2545
2514 /* don't poll if reset is going on */ 2546 /* don't poll if reset is going on */
2515 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 2547 if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
2516 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 2548 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
2517 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) { 2549 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
2518 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2550 dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
2519 2551
2520 if (qla4_8xxx_check_temp(ha)) { 2552 if (qla4_8xxx_check_temp(ha)) {
2521 ql4_printk(KERN_INFO, ha, "disabling pause" 2553 if (is_qla8022(ha)) {
2522 " transmit on port 0 & 1.\n"); 2554 ql4_printk(KERN_INFO, ha, "disabling pause transmit on port 0 & 1.\n");
2523 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, 2555 qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2524 CRB_NIU_XG_PAUSE_CTL_P0 | 2556 CRB_NIU_XG_PAUSE_CTL_P0 |
2525 CRB_NIU_XG_PAUSE_CTL_P1); 2557 CRB_NIU_XG_PAUSE_CTL_P1);
2558 }
2526 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); 2559 set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
2527 qla4xxx_wake_dpc(ha); 2560 qla4xxx_wake_dpc(ha);
2528 } else if (dev_state == QLA82XX_DEV_NEED_RESET && 2561 } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
2529 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 2562 !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
2530 if (!ql4xdontresethba) { 2563 if (is_qla8032(ha) ||
2564 (is_qla8022(ha) && !ql4xdontresethba)) {
2531 ql4_printk(KERN_INFO, ha, "%s: HW State: " 2565 ql4_printk(KERN_INFO, ha, "%s: HW State: "
2532 "NEED RESET!\n", __func__); 2566 "NEED RESET!\n", __func__);
2533 set_bit(DPC_RESET_HA, &ha->dpc_flags); 2567 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2534 qla4xxx_wake_dpc(ha); 2568 qla4xxx_wake_dpc(ha);
2535 } 2569 }
2536 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 2570 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
2537 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 2571 !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
2538 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n", 2572 ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n",
2539 __func__); 2573 __func__);
@@ -2541,36 +2575,8 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2541 qla4xxx_wake_dpc(ha); 2575 qla4xxx_wake_dpc(ha);
2542 } else { 2576 } else {
2543 /* Check firmware health */ 2577 /* Check firmware health */
2544 if (qla4_8xxx_check_fw_alive(ha)) { 2578 if (qla4_8xxx_check_fw_alive(ha))
2545 ql4_printk(KERN_INFO, ha, "disabling pause" 2579 qla4_8xxx_process_fw_error(ha);
2546 " transmit on port 0 & 1.\n");
2547 qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
2548 CRB_NIU_XG_PAUSE_CTL_P0 |
2549 CRB_NIU_XG_PAUSE_CTL_P1);
2550 halt_status = qla4_8xxx_rd_32(ha,
2551 QLA82XX_PEG_HALT_STATUS1);
2552
2553 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
2554 ql4_printk(KERN_ERR, ha, "%s:"
2555 " Firmware aborted with"
2556 " error code 0x00006700."
2557 " Device is being reset\n",
2558 __func__);
2559
2560 /* Since we cannot change dev_state in interrupt
2561 * context, set appropriate DPC flag then wakeup
2562 * DPC */
2563 if (halt_status & HALT_STATUS_UNRECOVERABLE)
2564 set_bit(DPC_HA_UNRECOVERABLE,
2565 &ha->dpc_flags);
2566 else {
2567 ql4_printk(KERN_INFO, ha, "%s: detect "
2568 "abort needed!\n", __func__);
2569 set_bit(DPC_RESET_HA, &ha->dpc_flags);
2570 }
2571 qla4xxx_mailbox_premature_completion(ha);
2572 qla4xxx_wake_dpc(ha);
2573 }
2574 } 2580 }
2575 } 2581 }
2576} 2582}
@@ -2652,11 +2658,10 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
2652 if (!pci_channel_offline(ha->pdev)) 2658 if (!pci_channel_offline(ha->pdev))
2653 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 2659 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
2654 2660
2655 if (is_qla8022(ha)) { 2661 if (is_qla80XX(ha))
2656 qla4_8xxx_watchdog(ha); 2662 qla4_8xxx_watchdog(ha);
2657 }
2658 2663
2659 if (!is_qla8022(ha)) { 2664 if (is_qla40XX(ha)) {
2660 /* Check for heartbeat interval. */ 2665 /* Check for heartbeat interval. */
2661 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE && 2666 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
2662 ha->heartbeat_interval != 0) { 2667 ha->heartbeat_interval != 0) {
@@ -2941,6 +2946,14 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
2941 2946
2942 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 2947 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
2943 2948
2949 if (is_qla8032(ha) &&
2950 !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2951 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
2952 __func__);
2953 /* disable pause frame for ISP83xx */
2954 qla4_83xx_disable_pause(ha);
2955 }
2956
2944 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); 2957 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2945 2958
2946 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 2959 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
@@ -2953,9 +2966,9 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
2953 goto recover_ha_init_adapter; 2966 goto recover_ha_init_adapter;
2954 } 2967 }
2955 2968
2956 /* For the ISP-82xx adapter, issue a stop_firmware if invoked 2969 /* For the ISP-8xxx adapter, issue a stop_firmware if invoked
2957 * from eh_host_reset or ioctl module */ 2970 * from eh_host_reset or ioctl module */
2958 if (is_qla8022(ha) && !reset_chip && 2971 if (is_qla80XX(ha) && !reset_chip &&
2959 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { 2972 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
2960 2973
2961 DEBUG2(ql4_printk(KERN_INFO, ha, 2974 DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -2978,13 +2991,13 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
2978 } 2991 }
2979 2992
2980 /* Issue full chip reset if recovering from a catastrophic error, 2993 /* Issue full chip reset if recovering from a catastrophic error,
2981 * or if stop_firmware fails for ISP-82xx. 2994 * or if stop_firmware fails for ISP-8xxx.
2982 * This is the default case for ISP-4xxx */ 2995 * This is the default case for ISP-4xxx */
2983 if (!is_qla8022(ha) || reset_chip) { 2996 if (is_qla40XX(ha) || reset_chip) {
2984 if (!is_qla8022(ha)) 2997 if (is_qla40XX(ha))
2985 goto chip_reset; 2998 goto chip_reset;
2986 2999
2987 /* Check if 82XX firmware is alive or not 3000 /* Check if 8XXX firmware is alive or not
2988 * We may have arrived here from NEED_RESET 3001 * We may have arrived here from NEED_RESET
2989 * detection only */ 3002 * detection only */
2990 if (test_bit(AF_FW_RECOVERY, &ha->flags)) 3003 if (test_bit(AF_FW_RECOVERY, &ha->flags))
@@ -3000,10 +3013,10 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
3000 set_current_state(TASK_UNINTERRUPTIBLE); 3013 set_current_state(TASK_UNINTERRUPTIBLE);
3001 schedule_timeout(HZ); 3014 schedule_timeout(HZ);
3002 } 3015 }
3003 3016chip_reset:
3004 if (!test_bit(AF_FW_RECOVERY, &ha->flags)) 3017 if (!test_bit(AF_FW_RECOVERY, &ha->flags))
3005 qla4xxx_cmd_wait(ha); 3018 qla4xxx_cmd_wait(ha);
3006chip_reset: 3019
3007 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 3020 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
3008 qla4xxx_abort_active_cmds(ha, DID_RESET << 16); 3021 qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
3009 DEBUG2(ql4_printk(KERN_INFO, ha, 3022 DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -3021,7 +3034,7 @@ recover_ha_init_adapter:
3021 /* For ISP-4xxx, force function 1 to always initialize 3034 /* For ISP-4xxx, force function 1 to always initialize
3022 * before function 3 to prevent both funcions from 3035 * before function 3 to prevent both funcions from
3023 * stepping on top of the other */ 3036 * stepping on top of the other */
3024 if (!is_qla8022(ha) && (ha->mac_index == 3)) 3037 if (is_qla40XX(ha) && (ha->mac_index == 3))
3025 ssleep(6); 3038 ssleep(6);
3026 3039
3027 /* NOTE: AF_ONLINE flag set upon successful completion of 3040 /* NOTE: AF_ONLINE flag set upon successful completion of
@@ -3039,11 +3052,12 @@ recover_ha_init_adapter:
3039 * Since we don't want to block the DPC for too long 3052 * Since we don't want to block the DPC for too long
3040 * with multiple resets in the same thread, 3053 * with multiple resets in the same thread,
3041 * utilize DPC to retry */ 3054 * utilize DPC to retry */
3042 if (is_qla8022(ha)) { 3055 if (is_qla80XX(ha)) {
3043 qla4_8xxx_idc_lock(ha); 3056 ha->isp_ops->idc_lock(ha);
3044 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3057 dev_state = qla4_8xxx_rd_direct(ha,
3045 qla4_8xxx_idc_unlock(ha); 3058 QLA8XXX_CRB_DEV_STATE);
3046 if (dev_state == QLA82XX_DEV_FAILED) { 3059 ha->isp_ops->idc_unlock(ha);
3060 if (dev_state == QLA8XXX_DEV_FAILED) {
3047 ql4_printk(KERN_INFO, ha, "%s: don't retry " 3061 ql4_printk(KERN_INFO, ha, "%s: don't retry "
3048 "recover adapter. H/W is in Failed " 3062 "recover adapter. H/W is in Failed "
3049 "state\n", __func__); 3063 "state\n", __func__);
@@ -3168,6 +3182,7 @@ int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
3168 struct iscsi_session *sess; 3182 struct iscsi_session *sess;
3169 struct ddb_entry *ddb_entry; 3183 struct ddb_entry *ddb_entry;
3170 struct scsi_qla_host *ha; 3184 struct scsi_qla_host *ha;
3185 int status = QLA_SUCCESS;
3171 3186
3172 sess = cls_session->dd_data; 3187 sess = cls_session->dd_data;
3173 ddb_entry = sess->dd_data; 3188 ddb_entry = sess->dd_data;
@@ -3175,11 +3190,20 @@ int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
3175 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 3190 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
3176 " unblock user space session\n", ha->host_no, __func__, 3191 " unblock user space session\n", ha->host_no, __func__,
3177 ddb_entry->fw_ddb_index); 3192 ddb_entry->fw_ddb_index);
3178 iscsi_conn_start(ddb_entry->conn);
3179 iscsi_conn_login_event(ddb_entry->conn,
3180 ISCSI_CONN_STATE_LOGGED_IN);
3181 3193
3182 return QLA_SUCCESS; 3194 if (!iscsi_is_session_online(cls_session)) {
3195 iscsi_conn_start(ddb_entry->conn);
3196 iscsi_conn_login_event(ddb_entry->conn,
3197 ISCSI_CONN_STATE_LOGGED_IN);
3198 } else {
3199 ql4_printk(KERN_INFO, ha,
3200 "scsi%ld: %s: ddb[%d] session [%d] already logged in\n",
3201 ha->host_no, __func__, ddb_entry->fw_ddb_index,
3202 cls_session->sid);
3203 status = QLA_ERROR;
3204 }
3205
3206 return status;
3183} 3207}
3184 3208
3185static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) 3209static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
@@ -3373,15 +3397,26 @@ static void qla4xxx_do_dpc(struct work_struct *work)
3373 /* post events to application */ 3397 /* post events to application */
3374 qla4xxx_do_work(ha); 3398 qla4xxx_do_work(ha);
3375 3399
3376 if (is_qla8022(ha)) { 3400 if (is_qla80XX(ha)) {
3377 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { 3401 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
3378 qla4_8xxx_idc_lock(ha); 3402 if (is_qla8032(ha)) {
3379 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 3403 ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
3380 QLA82XX_DEV_FAILED); 3404 __func__);
3381 qla4_8xxx_idc_unlock(ha); 3405 /* disable pause frame for ISP83xx */
3406 qla4_83xx_disable_pause(ha);
3407 }
3408
3409 ha->isp_ops->idc_lock(ha);
3410 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
3411 QLA8XXX_DEV_FAILED);
3412 ha->isp_ops->idc_unlock(ha);
3382 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n"); 3413 ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
3383 qla4_8xxx_device_state_handler(ha); 3414 qla4_8xxx_device_state_handler(ha);
3384 } 3415 }
3416
3417 if (test_and_clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags))
3418 qla4_83xx_post_idc_ack(ha);
3419
3385 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { 3420 if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
3386 qla4_8xxx_need_qsnt_handler(ha); 3421 qla4_8xxx_need_qsnt_handler(ha);
3387 } 3422 }
@@ -3391,7 +3426,8 @@ static void qla4xxx_do_dpc(struct work_struct *work)
3391 (test_bit(DPC_RESET_HA, &ha->dpc_flags) || 3426 (test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
3392 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 3427 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
3393 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) { 3428 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
3394 if (ql4xdontresethba) { 3429 if ((is_qla8022(ha) && ql4xdontresethba) ||
3430 (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
3395 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 3431 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
3396 ha->host_no, __func__)); 3432 ha->host_no, __func__));
3397 clear_bit(DPC_RESET_HA, &ha->dpc_flags); 3433 clear_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -3477,6 +3513,18 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3477 ha->isp_ops->disable_intrs(ha); 3513 ha->isp_ops->disable_intrs(ha);
3478 } 3514 }
3479 3515
3516 if (is_qla40XX(ha)) {
3517 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
3518 &ha->reg->ctrl_status);
3519 readl(&ha->reg->ctrl_status);
3520 } else if (is_qla8022(ha)) {
3521 writel(0, &ha->qla4_82xx_reg->host_int);
3522 readl(&ha->qla4_82xx_reg->host_int);
3523 } else if (is_qla8032(ha)) {
3524 writel(0, &ha->qla4_83xx_reg->risc_intr);
3525 readl(&ha->qla4_83xx_reg->risc_intr);
3526 }
3527
3480 /* Remove timer thread, if present */ 3528 /* Remove timer thread, if present */
3481 if (ha->timer_active) 3529 if (ha->timer_active)
3482 qla4xxx_stop_timer(ha); 3530 qla4xxx_stop_timer(ha);
@@ -3492,10 +3540,10 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3492 /* Put firmware in known state */ 3540 /* Put firmware in known state */
3493 ha->isp_ops->reset_firmware(ha); 3541 ha->isp_ops->reset_firmware(ha);
3494 3542
3495 if (is_qla8022(ha)) { 3543 if (is_qla80XX(ha)) {
3496 qla4_8xxx_idc_lock(ha); 3544 ha->isp_ops->idc_lock(ha);
3497 qla4_8xxx_clear_drv_active(ha); 3545 qla4_8xxx_clear_drv_active(ha);
3498 qla4_8xxx_idc_unlock(ha); 3546 ha->isp_ops->idc_unlock(ha);
3499 } 3547 }
3500 3548
3501 /* Detach interrupts */ 3549 /* Detach interrupts */
@@ -3542,16 +3590,20 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
3542 /* Mapping of IO base pointer, door bell read and write pointer */ 3590 /* Mapping of IO base pointer, door bell read and write pointer */
3543 3591
3544 /* mapping of IO base pointer */ 3592 /* mapping of IO base pointer */
3545 ha->qla4_8xxx_reg = 3593 if (is_qla8022(ha)) {
3546 (struct device_reg_82xx __iomem *)((uint8_t *)ha->nx_pcibase + 3594 ha->qla4_82xx_reg = (struct device_reg_82xx __iomem *)
3547 0xbc000 + (ha->pdev->devfn << 11)); 3595 ((uint8_t *)ha->nx_pcibase + 0xbc000 +
3596 (ha->pdev->devfn << 11));
3597 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
3598 QLA82XX_CAM_RAM_DB2);
3599 } else if (is_qla8032(ha)) {
3600 ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
3601 ((uint8_t *)ha->nx_pcibase);
3602 }
3548 3603
3549 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ 3604 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
3550 db_len = pci_resource_len(pdev, 4); 3605 db_len = pci_resource_len(pdev, 4);
3551 3606
3552 ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
3553 QLA82XX_CAM_RAM_DB2);
3554
3555 return 0; 3607 return 0;
3556iospace_error_exit: 3608iospace_error_exit:
3557 return -ENOMEM; 3609 return -ENOMEM;
@@ -3639,23 +3691,64 @@ static struct isp_operations qla4xxx_isp_ops = {
3639 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, 3691 .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out,
3640 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, 3692 .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in,
3641 .get_sys_info = qla4xxx_get_sys_info, 3693 .get_sys_info = qla4xxx_get_sys_info,
3694 .queue_mailbox_command = qla4xxx_queue_mbox_cmd,
3695 .process_mailbox_interrupt = qla4xxx_process_mbox_intr,
3642}; 3696};
3643 3697
3644static struct isp_operations qla4_8xxx_isp_ops = { 3698static struct isp_operations qla4_82xx_isp_ops = {
3645 .iospace_config = qla4_8xxx_iospace_config, 3699 .iospace_config = qla4_8xxx_iospace_config,
3646 .pci_config = qla4_8xxx_pci_config, 3700 .pci_config = qla4_8xxx_pci_config,
3647 .disable_intrs = qla4_8xxx_disable_intrs, 3701 .disable_intrs = qla4_82xx_disable_intrs,
3648 .enable_intrs = qla4_8xxx_enable_intrs, 3702 .enable_intrs = qla4_82xx_enable_intrs,
3649 .start_firmware = qla4_8xxx_load_risc, 3703 .start_firmware = qla4_8xxx_load_risc,
3650 .intr_handler = qla4_8xxx_intr_handler, 3704 .restart_firmware = qla4_82xx_try_start_fw,
3651 .interrupt_service_routine = qla4_8xxx_interrupt_service_routine, 3705 .intr_handler = qla4_82xx_intr_handler,
3652 .reset_chip = qla4_8xxx_isp_reset, 3706 .interrupt_service_routine = qla4_82xx_interrupt_service_routine,
3707 .need_reset = qla4_8xxx_need_reset,
3708 .reset_chip = qla4_82xx_isp_reset,
3653 .reset_firmware = qla4_8xxx_stop_firmware, 3709 .reset_firmware = qla4_8xxx_stop_firmware,
3654 .queue_iocb = qla4_8xxx_queue_iocb, 3710 .queue_iocb = qla4_82xx_queue_iocb,
3655 .complete_iocb = qla4_8xxx_complete_iocb, 3711 .complete_iocb = qla4_82xx_complete_iocb,
3656 .rd_shdw_req_q_out = qla4_8xxx_rd_shdw_req_q_out, 3712 .rd_shdw_req_q_out = qla4_82xx_rd_shdw_req_q_out,
3657 .rd_shdw_rsp_q_in = qla4_8xxx_rd_shdw_rsp_q_in, 3713 .rd_shdw_rsp_q_in = qla4_82xx_rd_shdw_rsp_q_in,
3658 .get_sys_info = qla4_8xxx_get_sys_info, 3714 .get_sys_info = qla4_8xxx_get_sys_info,
3715 .rd_reg_direct = qla4_82xx_rd_32,
3716 .wr_reg_direct = qla4_82xx_wr_32,
3717 .rd_reg_indirect = qla4_82xx_md_rd_32,
3718 .wr_reg_indirect = qla4_82xx_md_wr_32,
3719 .idc_lock = qla4_82xx_idc_lock,
3720 .idc_unlock = qla4_82xx_idc_unlock,
3721 .rom_lock_recovery = qla4_82xx_rom_lock_recovery,
3722 .queue_mailbox_command = qla4_82xx_queue_mbox_cmd,
3723 .process_mailbox_interrupt = qla4_82xx_process_mbox_intr,
3724};
3725
3726static struct isp_operations qla4_83xx_isp_ops = {
3727 .iospace_config = qla4_8xxx_iospace_config,
3728 .pci_config = qla4_8xxx_pci_config,
3729 .disable_intrs = qla4_83xx_disable_intrs,
3730 .enable_intrs = qla4_83xx_enable_intrs,
3731 .start_firmware = qla4_8xxx_load_risc,
3732 .restart_firmware = qla4_83xx_start_firmware,
3733 .intr_handler = qla4_83xx_intr_handler,
3734 .interrupt_service_routine = qla4_83xx_interrupt_service_routine,
3735 .need_reset = qla4_8xxx_need_reset,
3736 .reset_chip = qla4_83xx_isp_reset,
3737 .reset_firmware = qla4_8xxx_stop_firmware,
3738 .queue_iocb = qla4_83xx_queue_iocb,
3739 .complete_iocb = qla4_83xx_complete_iocb,
3740 .rd_shdw_req_q_out = qla4_83xx_rd_shdw_req_q_out,
3741 .rd_shdw_rsp_q_in = qla4_83xx_rd_shdw_rsp_q_in,
3742 .get_sys_info = qla4_8xxx_get_sys_info,
3743 .rd_reg_direct = qla4_83xx_rd_reg,
3744 .wr_reg_direct = qla4_83xx_wr_reg,
3745 .rd_reg_indirect = qla4_83xx_rd_reg_indirect,
3746 .wr_reg_indirect = qla4_83xx_wr_reg_indirect,
3747 .idc_lock = qla4_83xx_drv_lock,
3748 .idc_unlock = qla4_83xx_drv_unlock,
3749 .rom_lock_recovery = qla4_83xx_rom_lock_recovery,
3750 .queue_mailbox_command = qla4_83xx_queue_mbox_cmd,
3751 .process_mailbox_interrupt = qla4_83xx_process_mbox_intr,
3659}; 3752};
3660 3753
3661uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 3754uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
@@ -3663,9 +3756,14 @@ uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3663 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out); 3756 return (uint16_t)le32_to_cpu(ha->shadow_regs->req_q_out);
3664} 3757}
3665 3758
3666uint16_t qla4_8xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha) 3759uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3667{ 3760{
3668 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->req_q_out)); 3761 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
3762}
3763
3764uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
3765{
3766 return (uint16_t)le32_to_cpu(readl(&ha->qla4_83xx_reg->req_q_out));
3669} 3767}
3670 3768
3671uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 3769uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
@@ -3673,9 +3771,14 @@ uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3673 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in); 3771 return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
3674} 3772}
3675 3773
3676uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) 3774uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3677{ 3775{
3678 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in)); 3776 return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
3777}
3778
3779uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
3780{
3781 return (uint16_t)le32_to_cpu(readl(&ha->qla4_83xx_reg->rsp_q_in));
3679} 3782}
3680 3783
3681static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) 3784static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
@@ -5050,30 +5153,36 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5050 ha->pdev = pdev; 5153 ha->pdev = pdev;
5051 ha->host = host; 5154 ha->host = host;
5052 ha->host_no = host->host_no; 5155 ha->host_no = host->host_no;
5156 ha->func_num = PCI_FUNC(ha->pdev->devfn);
5053 5157
5054 pci_enable_pcie_error_reporting(pdev); 5158 pci_enable_pcie_error_reporting(pdev);
5055 5159
5056 /* Setup Runtime configurable options */ 5160 /* Setup Runtime configurable options */
5057 if (is_qla8022(ha)) { 5161 if (is_qla8022(ha)) {
5058 ha->isp_ops = &qla4_8xxx_isp_ops; 5162 ha->isp_ops = &qla4_82xx_isp_ops;
5059 rwlock_init(&ha->hw_lock); 5163 ha->reg_tbl = (uint32_t *) qla4_82xx_reg_tbl;
5060 ha->qdr_sn_window = -1; 5164 ha->qdr_sn_window = -1;
5061 ha->ddr_mn_window = -1; 5165 ha->ddr_mn_window = -1;
5062 ha->curr_window = 255; 5166 ha->curr_window = 255;
5063 ha->func_num = PCI_FUNC(ha->pdev->devfn);
5064 nx_legacy_intr = &legacy_intr[ha->func_num]; 5167 nx_legacy_intr = &legacy_intr[ha->func_num];
5065 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; 5168 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
5066 ha->nx_legacy_intr.tgt_status_reg = 5169 ha->nx_legacy_intr.tgt_status_reg =
5067 nx_legacy_intr->tgt_status_reg; 5170 nx_legacy_intr->tgt_status_reg;
5068 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; 5171 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
5069 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; 5172 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
5173 } else if (is_qla8032(ha)) {
5174 ha->isp_ops = &qla4_83xx_isp_ops;
5175 ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
5070 } else { 5176 } else {
5071 ha->isp_ops = &qla4xxx_isp_ops; 5177 ha->isp_ops = &qla4xxx_isp_ops;
5072 } 5178 }
5073 5179
5074 /* Set EEH reset type to fundamental if required by hba */ 5180 if (is_qla80XX(ha)) {
5075 if (is_qla8022(ha)) 5181 rwlock_init(&ha->hw_lock);
5182 ha->pf_bit = ha->func_num << 16;
5183 /* Set EEH reset type to fundamental if required by hba */
5076 pdev->needs_freset = 1; 5184 pdev->needs_freset = 1;
5185 }
5077 5186
5078 /* Configure PCI I/O space. */ 5187 /* Configure PCI I/O space. */
5079 ret = ha->isp_ops->iospace_config(ha); 5188 ret = ha->isp_ops->iospace_config(ha);
@@ -5094,6 +5203,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5094 init_completion(&ha->disable_acb_comp); 5203 init_completion(&ha->disable_acb_comp);
5095 5204
5096 spin_lock_init(&ha->hardware_lock); 5205 spin_lock_init(&ha->hardware_lock);
5206 spin_lock_init(&ha->work_lock);
5097 5207
5098 /* Initialize work list */ 5208 /* Initialize work list */
5099 INIT_LIST_HEAD(&ha->work_list); 5209 INIT_LIST_HEAD(&ha->work_list);
@@ -5128,8 +5238,20 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5128 if (ret) 5238 if (ret)
5129 goto probe_failed; 5239 goto probe_failed;
5130 5240
5131 if (is_qla8022(ha)) 5241 if (is_qla80XX(ha))
5132 (void) qla4_8xxx_get_flash_info(ha); 5242 qla4_8xxx_get_flash_info(ha);
5243
5244 if (is_qla8032(ha)) {
5245 qla4_83xx_read_reset_template(ha);
5246 /*
5247 * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
5248 * If DONRESET_BIT0 is set, drivers should not set dev_state
5249 * to NEED_RESET. But if NEED_RESET is set, drivers should
5250 * should honor the reset.
5251 */
5252 if (ql4xdontresethba == 1)
5253 qla4_83xx_set_idc_dontreset(ha);
5254 }
5133 5255
5134 /* 5256 /*
5135 * Initialize the Host adapter request/response queues and 5257 * Initialize the Host adapter request/response queues and
@@ -5137,14 +5259,20 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5137 * NOTE: interrupts enabled upon successful completion 5259 * NOTE: interrupts enabled upon successful completion
5138 */ 5260 */
5139 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 5261 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
5262
5263 /* Dont retry adapter initialization if IRQ allocation failed */
5264 if (!test_bit(AF_IRQ_ATTACHED, &ha->flags))
5265 goto skip_retry_init;
5266
5140 while ((!test_bit(AF_ONLINE, &ha->flags)) && 5267 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
5141 init_retry_count++ < MAX_INIT_RETRIES) { 5268 init_retry_count++ < MAX_INIT_RETRIES) {
5142 5269
5143 if (is_qla8022(ha)) { 5270 if (is_qla80XX(ha)) {
5144 qla4_8xxx_idc_lock(ha); 5271 ha->isp_ops->idc_lock(ha);
5145 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 5272 dev_state = qla4_8xxx_rd_direct(ha,
5146 qla4_8xxx_idc_unlock(ha); 5273 QLA82XX_CRB_DEV_STATE);
5147 if (dev_state == QLA82XX_DEV_FAILED) { 5274 ha->isp_ops->idc_unlock(ha);
5275 if (dev_state == QLA8XXX_DEV_FAILED) {
5148 ql4_printk(KERN_WARNING, ha, "%s: don't retry " 5276 ql4_printk(KERN_WARNING, ha, "%s: don't retry "
5149 "initialize adapter. H/W is in failed state\n", 5277 "initialize adapter. H/W is in failed state\n",
5150 __func__); 5278 __func__);
@@ -5160,16 +5288,18 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5160 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); 5288 status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
5161 } 5289 }
5162 5290
5291skip_retry_init:
5163 if (!test_bit(AF_ONLINE, &ha->flags)) { 5292 if (!test_bit(AF_ONLINE, &ha->flags)) {
5164 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); 5293 ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
5165 5294
5166 if (is_qla8022(ha) && ql4xdontresethba) { 5295 if ((is_qla8022(ha) && ql4xdontresethba) ||
5296 (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
5167 /* Put the device in failed state. */ 5297 /* Put the device in failed state. */
5168 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); 5298 DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
5169 qla4_8xxx_idc_lock(ha); 5299 ha->isp_ops->idc_lock(ha);
5170 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 5300 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
5171 QLA82XX_DEV_FAILED); 5301 QLA8XXX_DEV_FAILED);
5172 qla4_8xxx_idc_unlock(ha); 5302 ha->isp_ops->idc_unlock(ha);
5173 } 5303 }
5174 ret = -ENODEV; 5304 ret = -ENODEV;
5175 goto remove_host; 5305 goto remove_host;
@@ -5195,12 +5325,13 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5195 goto remove_host; 5325 goto remove_host;
5196 } 5326 }
5197 5327
5198 /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc 5328 /*
5329 * For ISP-8XXX, request_irqs is called in qla4_8xxx_load_risc
5199 * (which is called indirectly by qla4xxx_initialize_adapter), 5330 * (which is called indirectly by qla4xxx_initialize_adapter),
5200 * so that irqs will be registered after crbinit but before 5331 * so that irqs will be registered after crbinit but before
5201 * mbx_intr_enable. 5332 * mbx_intr_enable.
5202 */ 5333 */
5203 if (!is_qla8022(ha)) { 5334 if (is_qla40XX(ha)) {
5204 ret = qla4xxx_request_irqs(ha); 5335 ret = qla4xxx_request_irqs(ha);
5205 if (ret) { 5336 if (ret) {
5206 ql4_printk(KERN_WARNING, ha, "Failed to reserve " 5337 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
@@ -5226,6 +5357,10 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5226 ha->host_no, ha->firmware_version[0], ha->firmware_version[1], 5357 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
5227 ha->patch_number, ha->build_number); 5358 ha->patch_number, ha->build_number);
5228 5359
5360 /* Set the driver version */
5361 if (is_qla80XX(ha))
5362 qla4_8xxx_set_param(ha, SET_DRVR_VERSION);
5363
5229 if (qla4xxx_setup_boot_info(ha)) 5364 if (qla4xxx_setup_boot_info(ha))
5230 ql4_printk(KERN_ERR, ha, 5365 ql4_printk(KERN_ERR, ha,
5231 "%s: No iSCSI boot target configured\n", __func__); 5366 "%s: No iSCSI boot target configured\n", __func__);
@@ -5333,9 +5468,16 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
5333{ 5468{
5334 struct scsi_qla_host *ha; 5469 struct scsi_qla_host *ha;
5335 5470
5471 /*
5472 * If the PCI device is disabled then it means probe_adapter had
5473 * failed and resources already cleaned up on probe_adapter exit.
5474 */
5475 if (!pci_is_enabled(pdev))
5476 return;
5477
5336 ha = pci_get_drvdata(pdev); 5478 ha = pci_get_drvdata(pdev);
5337 5479
5338 if (!is_qla8022(ha)) 5480 if (is_qla40XX(ha))
5339 qla4xxx_prevent_other_port_reinit(ha); 5481 qla4xxx_prevent_other_port_reinit(ha);
5340 5482
5341 /* destroy iface from sysfs */ 5483 /* destroy iface from sysfs */
@@ -5755,7 +5897,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
5755 5897
5756 ha = to_qla_host(cmd->device->host); 5898 ha = to_qla_host(cmd->device->host);
5757 5899
5758 if (ql4xdontresethba) { 5900 if (is_qla8032(ha) && ql4xdontresethba)
5901 qla4_83xx_set_idc_dontreset(ha);
5902
5903 /*
5904 * For ISP8324, if IDC_CTRL DONTRESET_BIT0 is set by other
5905 * protocol drivers, we should not set device_state to
5906 * NEED_RESET
5907 */
5908 if (ql4xdontresethba ||
5909 (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
5759 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 5910 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
5760 ha->host_no, __func__)); 5911 ha->host_no, __func__));
5761 5912
@@ -5779,7 +5930,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
5779 } 5930 }
5780 5931
5781 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 5932 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
5782 if (is_qla8022(ha)) 5933 if (is_qla80XX(ha))
5783 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); 5934 set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
5784 else 5935 else
5785 set_bit(DPC_RESET_HA, &ha->dpc_flags); 5936 set_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -5874,7 +6025,7 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
5874 break; 6025 break;
5875 case SCSI_FIRMWARE_RESET: 6026 case SCSI_FIRMWARE_RESET:
5876 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { 6027 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
5877 if (is_qla8022(ha)) 6028 if (is_qla80XX(ha))
5878 /* set firmware context reset */ 6029 /* set firmware context reset */
5879 set_bit(DPC_RESET_HA_FW_CONTEXT, 6030 set_bit(DPC_RESET_HA_FW_CONTEXT,
5880 &ha->dpc_flags); 6031 &ha->dpc_flags);
@@ -6013,32 +6164,43 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
6013 "0x%x is the owner\n", ha->host_no, __func__, 6164 "0x%x is the owner\n", ha->host_no, __func__,
6014 ha->pdev->devfn); 6165 ha->pdev->devfn);
6015 6166
6016 qla4_8xxx_idc_lock(ha); 6167 ha->isp_ops->idc_lock(ha);
6017 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6168 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
6018 QLA82XX_DEV_COLD); 6169 QLA8XXX_DEV_COLD);
6019 6170 ha->isp_ops->idc_unlock(ha);
6020 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, 6171
6021 QLA82XX_IDC_VERSION); 6172 rval = qla4_8xxx_update_idc_reg(ha);
6173 if (rval == QLA_ERROR) {
6174 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: FAILED\n",
6175 ha->host_no, __func__);
6176 ha->isp_ops->idc_lock(ha);
6177 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
6178 QLA8XXX_DEV_FAILED);
6179 ha->isp_ops->idc_unlock(ha);
6180 goto exit_error_recovery;
6181 }
6022 6182
6023 qla4_8xxx_idc_unlock(ha);
6024 clear_bit(AF_FW_RECOVERY, &ha->flags); 6183 clear_bit(AF_FW_RECOVERY, &ha->flags);
6025 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 6184 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
6026 qla4_8xxx_idc_lock(ha);
6027 6185
6028 if (rval != QLA_SUCCESS) { 6186 if (rval != QLA_SUCCESS) {
6029 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 6187 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
6030 "FAILED\n", ha->host_no, __func__); 6188 "FAILED\n", ha->host_no, __func__);
6189 ha->isp_ops->idc_lock(ha);
6031 qla4_8xxx_clear_drv_active(ha); 6190 qla4_8xxx_clear_drv_active(ha);
6032 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6191 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
6033 QLA82XX_DEV_FAILED); 6192 QLA8XXX_DEV_FAILED);
6193 ha->isp_ops->idc_unlock(ha);
6034 } else { 6194 } else {
6035 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " 6195 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "
6036 "READY\n", ha->host_no, __func__); 6196 "READY\n", ha->host_no, __func__);
6037 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 6197 ha->isp_ops->idc_lock(ha);
6038 QLA82XX_DEV_READY); 6198 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
6199 QLA8XXX_DEV_READY);
6039 /* Clear driver state register */ 6200 /* Clear driver state register */
6040 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0); 6201 qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
6041 qla4_8xxx_set_drv_active(ha); 6202 qla4_8xxx_set_drv_active(ha);
6203 ha->isp_ops->idc_unlock(ha);
6042 ret = qla4xxx_request_irqs(ha); 6204 ret = qla4xxx_request_irqs(ha);
6043 if (ret) { 6205 if (ret) {
6044 ql4_printk(KERN_WARNING, ha, "Failed to " 6206 ql4_printk(KERN_WARNING, ha, "Failed to "
@@ -6050,13 +6212,12 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
6050 rval = QLA_SUCCESS; 6212 rval = QLA_SUCCESS;
6051 } 6213 }
6052 } 6214 }
6053 qla4_8xxx_idc_unlock(ha);
6054 } else { 6215 } else {
6055 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not " 6216 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
6056 "the reset owner\n", ha->host_no, __func__, 6217 "the reset owner\n", ha->host_no, __func__,
6057 ha->pdev->devfn); 6218 ha->pdev->devfn);
6058 if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == 6219 if ((qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE) ==
6059 QLA82XX_DEV_READY)) { 6220 QLA8XXX_DEV_READY)) {
6060 clear_bit(AF_FW_RECOVERY, &ha->flags); 6221 clear_bit(AF_FW_RECOVERY, &ha->flags);
6061 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); 6222 rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
6062 if (rval == QLA_SUCCESS) { 6223 if (rval == QLA_SUCCESS) {
@@ -6071,11 +6232,12 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
6071 rval = QLA_SUCCESS; 6232 rval = QLA_SUCCESS;
6072 } 6233 }
6073 } 6234 }
6074 qla4_8xxx_idc_lock(ha); 6235 ha->isp_ops->idc_lock(ha);
6075 qla4_8xxx_set_drv_active(ha); 6236 qla4_8xxx_set_drv_active(ha);
6076 qla4_8xxx_idc_unlock(ha); 6237 ha->isp_ops->idc_unlock(ha);
6077 } 6238 }
6078 } 6239 }
6240exit_error_recovery:
6079 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 6241 clear_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
6080 return rval; 6242 return rval;
6081} 6243}
@@ -6114,7 +6276,7 @@ qla4xxx_pci_slot_reset(struct pci_dev *pdev)
6114 6276
6115 ha->isp_ops->disable_intrs(ha); 6277 ha->isp_ops->disable_intrs(ha);
6116 6278
6117 if (is_qla8022(ha)) { 6279 if (is_qla80XX(ha)) {
6118 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) { 6280 if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
6119 ret = PCI_ERS_RESULT_RECOVERED; 6281 ret = PCI_ERS_RESULT_RECOVERED;
6120 goto exit_slot_reset; 6282 goto exit_slot_reset;
@@ -6180,6 +6342,12 @@ static struct pci_device_id qla4xxx_pci_tbl[] = {
6180 .subvendor = PCI_ANY_ID, 6342 .subvendor = PCI_ANY_ID,
6181 .subdevice = PCI_ANY_ID, 6343 .subdevice = PCI_ANY_ID,
6182 }, 6344 },
6345 {
6346 .vendor = PCI_VENDOR_ID_QLOGIC,
6347 .device = PCI_DEVICE_ID_QLOGIC_ISP8324,
6348 .subvendor = PCI_ANY_ID,
6349 .subdevice = PCI_ANY_ID,
6350 },
6183 {0, 0}, 6351 {0, 0},
6184}; 6352};
6185MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); 6353MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 725034f4252..f6df2ea91ab 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -1,8 +1,8 @@
1/* 1/*
2 * QLogic iSCSI HBA Driver 2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation 3 * Copyright (c) 2003-2012 QLogic Corporation
4 * 4 *
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k18" 8#define QLA4XXX_DRIVER_VERSION "5.03.00-k1"
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 182d5a57ab7..57fbd5a3d4e 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -109,6 +109,7 @@ static const char * scsi_debug_version_date = "20100324";
109#define DEF_OPT_BLKS 64 109#define DEF_OPT_BLKS 64
110#define DEF_PHYSBLK_EXP 0 110#define DEF_PHYSBLK_EXP 0
111#define DEF_PTYPE 0 111#define DEF_PTYPE 0
112#define DEF_REMOVABLE false
112#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */ 113#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
113#define DEF_SECTOR_SIZE 512 114#define DEF_SECTOR_SIZE 512
114#define DEF_UNMAP_ALIGNMENT 0 115#define DEF_UNMAP_ALIGNMENT 0
@@ -193,11 +194,11 @@ static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
193static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; 194static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
194static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; 195static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
195static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH; 196static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
197static bool scsi_debug_removable = DEF_REMOVABLE;
196 198
197static int scsi_debug_cmnd_count = 0; 199static int scsi_debug_cmnd_count = 0;
198 200
199#define DEV_READONLY(TGT) (0) 201#define DEV_READONLY(TGT) (0)
200#define DEV_REMOVEABLE(TGT) (0)
201 202
202static unsigned int sdebug_store_sectors; 203static unsigned int sdebug_store_sectors;
203static sector_t sdebug_capacity; /* in sectors */ 204static sector_t sdebug_capacity; /* in sectors */
@@ -919,7 +920,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
919 return ret; 920 return ret;
920 } 921 }
921 /* drops through here for a standard inquiry */ 922 /* drops through here for a standard inquiry */
922 arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */ 923 arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */
923 arr[2] = scsi_debug_scsi_level; 924 arr[2] = scsi_debug_scsi_level;
924 arr[3] = 2; /* response_data_format==2 */ 925 arr[3] = 2; /* response_data_format==2 */
925 arr[4] = SDEBUG_LONG_INQ_SZ - 5; 926 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
@@ -1211,7 +1212,7 @@ static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1211 p[11] = sdebug_sectors_per & 0xff; 1212 p[11] = sdebug_sectors_per & 0xff;
1212 p[12] = (scsi_debug_sector_size >> 8) & 0xff; 1213 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1213 p[13] = scsi_debug_sector_size & 0xff; 1214 p[13] = scsi_debug_sector_size & 0xff;
1214 if (DEV_REMOVEABLE(target)) 1215 if (scsi_debug_removable)
1215 p[20] |= 0x20; /* should agree with INQUIRY */ 1216 p[20] |= 0x20; /* should agree with INQUIRY */
1216 if (1 == pcontrol) 1217 if (1 == pcontrol)
1217 memset(p + 2, 0, sizeof(format_pg) - 2); 1218 memset(p + 2, 0, sizeof(format_pg) - 2);
@@ -2754,6 +2755,7 @@ module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2754module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR); 2755module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2755module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); 2756module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2756module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR); 2757module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2758module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
2757module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO); 2759module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2758module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO); 2760module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2759module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); 2761module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
@@ -2796,6 +2798,7 @@ MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2796MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)"); 2798MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2797MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); 2799MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2798MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); 2800MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2801MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
2799MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); 2802MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2800MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); 2803MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2801MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); 2804MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
@@ -3205,6 +3208,25 @@ static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3205} 3208}
3206DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL); 3209DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
3207 3210
3211static ssize_t sdebug_removable_show(struct device_driver *ddp,
3212 char *buf)
3213{
3214 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
3215}
3216static ssize_t sdebug_removable_store(struct device_driver *ddp,
3217 const char *buf, size_t count)
3218{
3219 int n;
3220
3221 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3222 scsi_debug_removable = (n > 0);
3223 return count;
3224 }
3225 return -EINVAL;
3226}
3227DRIVER_ATTR(removable, S_IRUGO | S_IWUSR, sdebug_removable_show,
3228 sdebug_removable_store);
3229
3208 3230
3209/* Note: The following function creates attribute files in the 3231/* Note: The following function creates attribute files in the
3210 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these 3232 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
@@ -3230,6 +3252,7 @@ static int do_create_driverfs_files(void)
3230 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); 3252 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3231 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); 3253 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3232 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts); 3254 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
3255 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_removable);
3233 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 3256 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3234 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); 3257 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3235 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); 3258 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
@@ -3255,6 +3278,7 @@ static void do_remove_driverfs_files(void)
3255 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); 3278 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3256 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts); 3279 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
3257 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype); 3280 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3281 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_removable);
3258 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); 3282 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3259 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts); 3283 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3260 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld); 3284 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index cf8dfab9489..43fca9170bf 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -172,6 +172,7 @@ static struct {
172 {"HITACHI", "DF400", "*", BLIST_REPORTLUN2}, 172 {"HITACHI", "DF400", "*", BLIST_REPORTLUN2},
173 {"HITACHI", "DF500", "*", BLIST_REPORTLUN2}, 173 {"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
174 {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2}, 174 {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
175 {"HITACHI", "HUS1530", "*", BLIST_NO_DIF},
175 {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2}, 176 {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2},
176 {"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 177 {"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
177 {"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, 178 {"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index faa790fba13..da36a3a81a9 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2473,7 +2473,8 @@ scsi_internal_device_unblock(struct scsi_device *sdev,
2473 * Try to transition the scsi device to SDEV_RUNNING or one of the 2473 * Try to transition the scsi device to SDEV_RUNNING or one of the
2474 * offlined states and goose the device queue if successful. 2474 * offlined states and goose the device queue if successful.
2475 */ 2475 */
2476 if (sdev->sdev_state == SDEV_BLOCK) 2476 if ((sdev->sdev_state == SDEV_BLOCK) ||
2477 (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
2477 sdev->sdev_state = new_state; 2478 sdev->sdev_state = new_state;
2478 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) { 2479 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
2479 if (new_state == SDEV_TRANSPORT_OFFLINE || 2480 if (new_state == SDEV_TRANSPORT_OFFLINE ||
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index d947ffc20ce..3e58b2245f1 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -921,6 +921,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
921 if (*bflags & BLIST_RETRY_HWERROR) 921 if (*bflags & BLIST_RETRY_HWERROR)
922 sdev->retry_hwerror = 1; 922 sdev->retry_hwerror = 1;
923 923
924 if (*bflags & BLIST_NO_DIF)
925 sdev->no_dif = 1;
926
924 transport_configure_device(&sdev->sdev_gendev); 927 transport_configure_device(&sdev->sdev_gendev);
925 928
926 if (sdev->host->hostt->slave_configure) { 929 if (sdev->host->hostt->slave_configure) {
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 093d4f6a54d..ce5224c92ed 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1031,33 +1031,31 @@ static void __scsi_remove_target(struct scsi_target *starget)
1031void scsi_remove_target(struct device *dev) 1031void scsi_remove_target(struct device *dev)
1032{ 1032{
1033 struct Scsi_Host *shost = dev_to_shost(dev->parent); 1033 struct Scsi_Host *shost = dev_to_shost(dev->parent);
1034 struct scsi_target *starget, *found; 1034 struct scsi_target *starget, *last = NULL;
1035 unsigned long flags; 1035 unsigned long flags;
1036 1036
1037 restart: 1037 /* remove targets being careful to lookup next entry before
1038 found = NULL; 1038 * deleting the last
1039 */
1039 spin_lock_irqsave(shost->host_lock, flags); 1040 spin_lock_irqsave(shost->host_lock, flags);
1040 list_for_each_entry(starget, &shost->__targets, siblings) { 1041 list_for_each_entry(starget, &shost->__targets, siblings) {
1041 if (starget->state == STARGET_DEL) 1042 if (starget->state == STARGET_DEL)
1042 continue; 1043 continue;
1043 if (starget->dev.parent == dev || &starget->dev == dev) { 1044 if (starget->dev.parent == dev || &starget->dev == dev) {
1044 found = starget; 1045 /* assuming new targets arrive at the end */
1045 found->reap_ref++; 1046 starget->reap_ref++;
1046 break; 1047 spin_unlock_irqrestore(shost->host_lock, flags);
1048 if (last)
1049 scsi_target_reap(last);
1050 last = starget;
1051 __scsi_remove_target(starget);
1052 spin_lock_irqsave(shost->host_lock, flags);
1047 } 1053 }
1048 } 1054 }
1049 spin_unlock_irqrestore(shost->host_lock, flags); 1055 spin_unlock_irqrestore(shost->host_lock, flags);
1050 1056
1051 if (found) { 1057 if (last)
1052 __scsi_remove_target(found); 1058 scsi_target_reap(last);
1053 scsi_target_reap(found);
1054 /* in the case where @dev has multiple starget children,
1055 * continue removing.
1056 *
1057 * FIXME: does such a case exist?
1058 */
1059 goto restart;
1060 }
1061} 1059}
1062EXPORT_SYMBOL(scsi_remove_target); 1060EXPORT_SYMBOL(scsi_remove_target);
1063 1061
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 4df73e52a4f..12f6fdfc114 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -262,6 +262,28 @@ sd_show_protection_type(struct device *dev, struct device_attribute *attr,
262} 262}
263 263
264static ssize_t 264static ssize_t
265sd_store_protection_type(struct device *dev, struct device_attribute *attr,
266 const char *buf, size_t count)
267{
268 struct scsi_disk *sdkp = to_scsi_disk(dev);
269 unsigned int val;
270 int err;
271
272 if (!capable(CAP_SYS_ADMIN))
273 return -EACCES;
274
275 err = kstrtouint(buf, 10, &val);
276
277 if (err)
278 return err;
279
280 if (val >= 0 && val <= SD_DIF_TYPE3_PROTECTION)
281 sdkp->protection_type = val;
282
283 return count;
284}
285
286static ssize_t
265sd_show_protection_mode(struct device *dev, struct device_attribute *attr, 287sd_show_protection_mode(struct device *dev, struct device_attribute *attr,
266 char *buf) 288 char *buf)
267{ 289{
@@ -381,7 +403,8 @@ static struct device_attribute sd_disk_attrs[] = {
381 sd_store_allow_restart), 403 sd_store_allow_restart),
382 __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop, 404 __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
383 sd_store_manage_start_stop), 405 sd_store_manage_start_stop),
384 __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL), 406 __ATTR(protection_type, S_IRUGO|S_IWUSR, sd_show_protection_type,
407 sd_store_protection_type),
385 __ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL), 408 __ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL),
386 __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL), 409 __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
387 __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL), 410 __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
@@ -804,9 +827,8 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
804 SCpnt->cmnd[0] = WRITE_6; 827 SCpnt->cmnd[0] = WRITE_6;
805 SCpnt->sc_data_direction = DMA_TO_DEVICE; 828 SCpnt->sc_data_direction = DMA_TO_DEVICE;
806 829
807 if (blk_integrity_rq(rq) && 830 if (blk_integrity_rq(rq))
808 sd_dif_prepare(rq, block, sdp->sector_size) == -EIO) 831 sd_dif_prepare(rq, block, sdp->sector_size);
809 goto out;
810 832
811 } else if (rq_data_dir(rq) == READ) { 833 } else if (rq_data_dir(rq) == READ) {
812 SCpnt->cmnd[0] = READ_6; 834 SCpnt->cmnd[0] = READ_6;
@@ -1671,34 +1693,42 @@ sd_spinup_disk(struct scsi_disk *sdkp)
1671/* 1693/*
1672 * Determine whether disk supports Data Integrity Field. 1694 * Determine whether disk supports Data Integrity Field.
1673 */ 1695 */
1674static void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer) 1696static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
1675{ 1697{
1676 struct scsi_device *sdp = sdkp->device; 1698 struct scsi_device *sdp = sdkp->device;
1677 u8 type; 1699 u8 type;
1700 int ret = 0;
1678 1701
1679 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) 1702 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
1680 return; 1703 return ret;
1681 1704
1682 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ 1705 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
1683 1706
1684 if (type == sdkp->protection_type || !sdkp->first_scan) 1707 if (type > SD_DIF_TYPE3_PROTECTION)
1685 return; 1708 ret = -ENODEV;
1709 else if (scsi_host_dif_capable(sdp->host, type))
1710 ret = 1;
1711
1712 if (sdkp->first_scan || type != sdkp->protection_type)
1713 switch (ret) {
1714 case -ENODEV:
1715 sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
1716 " protection type %u. Disabling disk!\n",
1717 type);
1718 break;
1719 case 1:
1720 sd_printk(KERN_NOTICE, sdkp,
1721 "Enabling DIF Type %u protection\n", type);
1722 break;
1723 case 0:
1724 sd_printk(KERN_NOTICE, sdkp,
1725 "Disabling DIF Type %u protection\n", type);
1726 break;
1727 }
1686 1728
1687 sdkp->protection_type = type; 1729 sdkp->protection_type = type;
1688 1730
1689 if (type > SD_DIF_TYPE3_PROTECTION) { 1731 return ret;
1690 sd_printk(KERN_ERR, sdkp, "formatted with unsupported " \
1691 "protection type %u. Disabling disk!\n", type);
1692 sdkp->capacity = 0;
1693 return;
1694 }
1695
1696 if (scsi_host_dif_capable(sdp->host, type))
1697 sd_printk(KERN_NOTICE, sdkp,
1698 "Enabling DIF Type %u protection\n", type);
1699 else
1700 sd_printk(KERN_NOTICE, sdkp,
1701 "Disabling DIF Type %u protection\n", type);
1702} 1732}
1703 1733
1704static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, 1734static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
@@ -1794,7 +1824,10 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
1794 sector_size = get_unaligned_be32(&buffer[8]); 1824 sector_size = get_unaligned_be32(&buffer[8]);
1795 lba = get_unaligned_be64(&buffer[0]); 1825 lba = get_unaligned_be64(&buffer[0]);
1796 1826
1797 sd_read_protection_type(sdkp, buffer); 1827 if (sd_read_protection_type(sdkp, buffer) < 0) {
1828 sdkp->capacity = 0;
1829 return -ENODEV;
1830 }
1798 1831
1799 if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) { 1832 if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
1800 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " 1833 sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
@@ -2632,7 +2665,8 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2632 } 2665 }
2633 2666
2634 add_disk(gd); 2667 add_disk(gd);
2635 sd_dif_config_host(sdkp); 2668 if (sdkp->capacity)
2669 sd_dif_config_host(sdkp);
2636 2670
2637 sd_revalidate_disk(gd); 2671 sd_revalidate_disk(gd);
2638 2672
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index f703f4827b6..47c52a6d733 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -156,7 +156,7 @@ struct sd_dif_tuple {
156#ifdef CONFIG_BLK_DEV_INTEGRITY 156#ifdef CONFIG_BLK_DEV_INTEGRITY
157 157
158extern void sd_dif_config_host(struct scsi_disk *); 158extern void sd_dif_config_host(struct scsi_disk *);
159extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int); 159extern void sd_dif_prepare(struct request *rq, sector_t, unsigned int);
160extern void sd_dif_complete(struct scsi_cmnd *, unsigned int); 160extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
161 161
162#else /* CONFIG_BLK_DEV_INTEGRITY */ 162#else /* CONFIG_BLK_DEV_INTEGRITY */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index e52d5bc42bc..04998f36e50 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -366,7 +366,8 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
366 * 366 *
367 * Type 3 does not have a reference tag so no remapping is required. 367 * Type 3 does not have a reference tag so no remapping is required.
368 */ 368 */
369int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_sz) 369void sd_dif_prepare(struct request *rq, sector_t hw_sector,
370 unsigned int sector_sz)
370{ 371{
371 const int tuple_sz = sizeof(struct sd_dif_tuple); 372 const int tuple_sz = sizeof(struct sd_dif_tuple);
372 struct bio *bio; 373 struct bio *bio;
@@ -378,7 +379,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
378 sdkp = rq->bio->bi_bdev->bd_disk->private_data; 379 sdkp = rq->bio->bi_bdev->bd_disk->private_data;
379 380
380 if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION) 381 if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
381 return 0; 382 return;
382 383
383 phys = hw_sector & 0xffffffff; 384 phys = hw_sector & 0xffffffff;
384 385
@@ -397,10 +398,9 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
397 398
398 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { 399 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
399 400
400 if (be32_to_cpu(sdt->ref_tag) != virt) 401 if (be32_to_cpu(sdt->ref_tag) == virt)
401 goto error; 402 sdt->ref_tag = cpu_to_be32(phys);
402 403
403 sdt->ref_tag = cpu_to_be32(phys);
404 virt++; 404 virt++;
405 phys++; 405 phys++;
406 } 406 }
@@ -410,16 +410,6 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
410 410
411 bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY); 411 bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
412 } 412 }
413
414 return 0;
415
416error:
417 kunmap_atomic(sdt);
418 sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n",
419 __func__, virt, phys, be32_to_cpu(sdt->ref_tag),
420 be16_to_cpu(sdt->app_tag));
421
422 return -EILSEQ;
423} 413}
424 414
425/* 415/*
@@ -463,10 +453,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
463 return; 453 return;
464 } 454 }
465 455
466 if (be32_to_cpu(sdt->ref_tag) != phys && 456 if (be32_to_cpu(sdt->ref_tag) == phys)
467 sdt->app_tag != 0xffff)
468 sdt->ref_tag = 0xffffffff; /* Bad ref */
469 else
470 sdt->ref_tag = cpu_to_be32(virt); 457 sdt->ref_tag = cpu_to_be32(virt);
471 458
472 virt++; 459 virt++;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e41998cb098..98156a97c47 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -37,6 +37,7 @@ static const char *verstr = "20101219";
37#include <linux/blkdev.h> 37#include <linux/blkdev.h>
38#include <linux/moduleparam.h> 38#include <linux/moduleparam.h>
39#include <linux/cdev.h> 39#include <linux/cdev.h>
40#include <linux/idr.h>
40#include <linux/delay.h> 41#include <linux/delay.h>
41#include <linux/mutex.h> 42#include <linux/mutex.h>
42 43
@@ -74,17 +75,14 @@ static const char *verstr = "20101219";
74#include "st_options.h" 75#include "st_options.h"
75#include "st.h" 76#include "st.h"
76 77
77static DEFINE_MUTEX(st_mutex);
78static int buffer_kbs; 78static int buffer_kbs;
79static int max_sg_segs; 79static int max_sg_segs;
80static int try_direct_io = TRY_DIRECT_IO; 80static int try_direct_io = TRY_DIRECT_IO;
81static int try_rdio = 1; 81static int try_rdio = 1;
82static int try_wdio = 1; 82static int try_wdio = 1;
83 83
84static int st_dev_max; 84static struct class st_sysfs_class;
85static int st_nr_dev; 85static struct device_attribute st_dev_attrs[];
86
87static struct class *st_sysfs_class;
88 86
89MODULE_AUTHOR("Kai Makisara"); 87MODULE_AUTHOR("Kai Makisara");
90MODULE_DESCRIPTION("SCSI tape (st) driver"); 88MODULE_DESCRIPTION("SCSI tape (st) driver");
@@ -173,13 +171,9 @@ static int debugging = DEBUG;
173 24 bits) */ 171 24 bits) */
174#define SET_DENS_AND_BLK 0x10001 172#define SET_DENS_AND_BLK 0x10001
175 173
176static DEFINE_RWLOCK(st_dev_arr_lock);
177
178static int st_fixed_buffer_size = ST_FIXED_BUFFER_SIZE; 174static int st_fixed_buffer_size = ST_FIXED_BUFFER_SIZE;
179static int st_max_sg_segs = ST_MAX_SG; 175static int st_max_sg_segs = ST_MAX_SG;
180 176
181static struct scsi_tape **scsi_tapes = NULL;
182
183static int modes_defined; 177static int modes_defined;
184 178
185static int enlarge_buffer(struct st_buffer *, int, int); 179static int enlarge_buffer(struct st_buffer *, int, int);
@@ -198,7 +192,6 @@ static int st_remove(struct device *);
198 192
199static int do_create_sysfs_files(void); 193static int do_create_sysfs_files(void);
200static void do_remove_sysfs_files(void); 194static void do_remove_sysfs_files(void);
201static int do_create_class_files(struct scsi_tape *, int, int);
202 195
203static struct scsi_driver st_template = { 196static struct scsi_driver st_template = {
204 .owner = THIS_MODULE, 197 .owner = THIS_MODULE,
@@ -221,6 +214,10 @@ static void scsi_tape_release(struct kref *);
221#define to_scsi_tape(obj) container_of(obj, struct scsi_tape, kref) 214#define to_scsi_tape(obj) container_of(obj, struct scsi_tape, kref)
222 215
223static DEFINE_MUTEX(st_ref_mutex); 216static DEFINE_MUTEX(st_ref_mutex);
217static DEFINE_SPINLOCK(st_index_lock);
218static DEFINE_SPINLOCK(st_use_lock);
219static DEFINE_IDR(st_index_idr);
220
224 221
225 222
226#include "osst_detect.h" 223#include "osst_detect.h"
@@ -238,10 +235,9 @@ static struct scsi_tape *scsi_tape_get(int dev)
238 struct scsi_tape *STp = NULL; 235 struct scsi_tape *STp = NULL;
239 236
240 mutex_lock(&st_ref_mutex); 237 mutex_lock(&st_ref_mutex);
241 write_lock(&st_dev_arr_lock); 238 spin_lock(&st_index_lock);
242 239
243 if (dev < st_dev_max && scsi_tapes != NULL) 240 STp = idr_find(&st_index_idr, dev);
244 STp = scsi_tapes[dev];
245 if (!STp) goto out; 241 if (!STp) goto out;
246 242
247 kref_get(&STp->kref); 243 kref_get(&STp->kref);
@@ -258,7 +254,7 @@ out_put:
258 kref_put(&STp->kref, scsi_tape_release); 254 kref_put(&STp->kref, scsi_tape_release);
259 STp = NULL; 255 STp = NULL;
260out: 256out:
261 write_unlock(&st_dev_arr_lock); 257 spin_unlock(&st_index_lock);
262 mutex_unlock(&st_ref_mutex); 258 mutex_unlock(&st_ref_mutex);
263 return STp; 259 return STp;
264} 260}
@@ -1188,7 +1184,6 @@ static int st_open(struct inode *inode, struct file *filp)
1188 int dev = TAPE_NR(inode); 1184 int dev = TAPE_NR(inode);
1189 char *name; 1185 char *name;
1190 1186
1191 mutex_lock(&st_mutex);
1192 /* 1187 /*
1193 * We really want to do nonseekable_open(inode, filp); here, but some 1188 * We really want to do nonseekable_open(inode, filp); here, but some
1194 * versions of tar incorrectly call lseek on tapes and bail out if that 1189 * versions of tar incorrectly call lseek on tapes and bail out if that
@@ -1197,24 +1192,22 @@ static int st_open(struct inode *inode, struct file *filp)
1197 filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE); 1192 filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
1198 1193
1199 if (!(STp = scsi_tape_get(dev))) { 1194 if (!(STp = scsi_tape_get(dev))) {
1200 mutex_unlock(&st_mutex);
1201 return -ENXIO; 1195 return -ENXIO;
1202 } 1196 }
1203 1197
1204 write_lock(&st_dev_arr_lock);
1205 filp->private_data = STp; 1198 filp->private_data = STp;
1206 name = tape_name(STp); 1199 name = tape_name(STp);
1207 1200
1201 spin_lock(&st_use_lock);
1208 if (STp->in_use) { 1202 if (STp->in_use) {
1209 write_unlock(&st_dev_arr_lock); 1203 spin_unlock(&st_use_lock);
1210 scsi_tape_put(STp); 1204 scsi_tape_put(STp);
1211 mutex_unlock(&st_mutex);
1212 DEB( printk(ST_DEB_MSG "%s: Device already in use.\n", name); ) 1205 DEB( printk(ST_DEB_MSG "%s: Device already in use.\n", name); )
1213 return (-EBUSY); 1206 return (-EBUSY);
1214 } 1207 }
1215 1208
1216 STp->in_use = 1; 1209 STp->in_use = 1;
1217 write_unlock(&st_dev_arr_lock); 1210 spin_unlock(&st_use_lock);
1218 STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0; 1211 STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0;
1219 1212
1220 if (scsi_autopm_get_device(STp->device) < 0) { 1213 if (scsi_autopm_get_device(STp->device) < 0) {
@@ -1262,16 +1255,16 @@ static int st_open(struct inode *inode, struct file *filp)
1262 retval = (-EIO); 1255 retval = (-EIO);
1263 goto err_out; 1256 goto err_out;
1264 } 1257 }
1265 mutex_unlock(&st_mutex);
1266 return 0; 1258 return 0;
1267 1259
1268 err_out: 1260 err_out:
1269 normalize_buffer(STp->buffer); 1261 normalize_buffer(STp->buffer);
1262 spin_lock(&st_use_lock);
1270 STp->in_use = 0; 1263 STp->in_use = 0;
1264 spin_unlock(&st_use_lock);
1271 scsi_tape_put(STp); 1265 scsi_tape_put(STp);
1272 if (resumed) 1266 if (resumed)
1273 scsi_autopm_put_device(STp->device); 1267 scsi_autopm_put_device(STp->device);
1274 mutex_unlock(&st_mutex);
1275 return retval; 1268 return retval;
1276 1269
1277} 1270}
@@ -1403,9 +1396,9 @@ static int st_release(struct inode *inode, struct file *filp)
1403 do_door_lock(STp, 0); 1396 do_door_lock(STp, 0);
1404 1397
1405 normalize_buffer(STp->buffer); 1398 normalize_buffer(STp->buffer);
1406 write_lock(&st_dev_arr_lock); 1399 spin_lock(&st_use_lock);
1407 STp->in_use = 0; 1400 STp->in_use = 0;
1408 write_unlock(&st_dev_arr_lock); 1401 spin_unlock(&st_use_lock);
1409 scsi_autopm_put_device(STp->device); 1402 scsi_autopm_put_device(STp->device);
1410 scsi_tape_put(STp); 1403 scsi_tape_put(STp);
1411 1404
@@ -3992,16 +3985,98 @@ static const struct file_operations st_fops =
3992 .llseek = noop_llseek, 3985 .llseek = noop_llseek,
3993}; 3986};
3994 3987
3988static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
3989{
3990 int i, error;
3991 dev_t cdev_devno;
3992 struct cdev *cdev;
3993 struct device *dev;
3994 struct st_modedef *STm = &(tape->modes[mode]);
3995 char name[10];
3996 int dev_num = tape->index;
3997
3998 cdev_devno = MKDEV(SCSI_TAPE_MAJOR, TAPE_MINOR(dev_num, mode, rew));
3999
4000 cdev = cdev_alloc();
4001 if (!cdev) {
4002 pr_err("st%d: out of memory. Device not attached.\n", dev_num);
4003 error = -ENOMEM;
4004 goto out;
4005 }
4006 cdev->owner = THIS_MODULE;
4007 cdev->ops = &st_fops;
4008
4009 error = cdev_add(cdev, cdev_devno, 1);
4010 if (error) {
4011 pr_err("st%d: Can't add %s-rewind mode %d\n", dev_num,
4012 rew ? "non" : "auto", mode);
4013 pr_err("st%d: Device not attached.\n", dev_num);
4014 goto out_free;
4015 }
4016 STm->cdevs[rew] = cdev;
4017
4018 i = mode << (4 - ST_NBR_MODE_BITS);
4019 snprintf(name, 10, "%s%s%s", rew ? "n" : "",
4020 tape->disk->disk_name, st_formats[i]);
4021
4022 dev = device_create(&st_sysfs_class, &tape->device->sdev_gendev,
4023 cdev_devno, &tape->modes[mode], "%s", name);
4024 if (IS_ERR(dev)) {
4025 pr_err("st%d: device_create failed\n", dev_num);
4026 error = PTR_ERR(dev);
4027 goto out_free;
4028 }
4029
4030 STm->devs[rew] = dev;
4031
4032 return 0;
4033out_free:
4034 cdev_del(STm->cdevs[rew]);
4035 STm->cdevs[rew] = NULL;
4036out:
4037 return error;
4038}
4039
4040static int create_cdevs(struct scsi_tape *tape)
4041{
4042 int mode, error;
4043 for (mode = 0; mode < ST_NBR_MODES; ++mode) {
4044 error = create_one_cdev(tape, mode, 0);
4045 if (error)
4046 return error;
4047 error = create_one_cdev(tape, mode, 1);
4048 if (error)
4049 return error;
4050 }
4051
4052 return sysfs_create_link(&tape->device->sdev_gendev.kobj,
4053 &tape->modes[0].devs[0]->kobj, "tape");
4054}
4055
4056static void remove_cdevs(struct scsi_tape *tape)
4057{
4058 int mode, rew;
4059 sysfs_remove_link(&tape->device->sdev_gendev.kobj, "tape");
4060 for (mode = 0; mode < ST_NBR_MODES; mode++) {
4061 struct st_modedef *STm = &(tape->modes[mode]);
4062 for (rew = 0; rew < 2; rew++) {
4063 if (STm->cdevs[rew])
4064 cdev_del(STm->cdevs[rew]);
4065 if (STm->devs[rew])
4066 device_unregister(STm->devs[rew]);
4067 }
4068 }
4069}
4070
3995static int st_probe(struct device *dev) 4071static int st_probe(struct device *dev)
3996{ 4072{
3997 struct scsi_device *SDp = to_scsi_device(dev); 4073 struct scsi_device *SDp = to_scsi_device(dev);
3998 struct gendisk *disk = NULL; 4074 struct gendisk *disk = NULL;
3999 struct cdev *cdev = NULL;
4000 struct scsi_tape *tpnt = NULL; 4075 struct scsi_tape *tpnt = NULL;
4001 struct st_modedef *STm; 4076 struct st_modedef *STm;
4002 struct st_partstat *STps; 4077 struct st_partstat *STps;
4003 struct st_buffer *buffer; 4078 struct st_buffer *buffer;
4004 int i, j, mode, dev_num, error; 4079 int i, dev_num, error;
4005 char *stp; 4080 char *stp;
4006 4081
4007 if (SDp->type != TYPE_TAPE) 4082 if (SDp->type != TYPE_TAPE)
@@ -4028,58 +4103,16 @@ static int st_probe(struct device *dev)
4028 goto out_buffer_free; 4103 goto out_buffer_free;
4029 } 4104 }
4030 4105
4031 write_lock(&st_dev_arr_lock);
4032 if (st_nr_dev >= st_dev_max) {
4033 struct scsi_tape **tmp_da;
4034 int tmp_dev_max;
4035
4036 tmp_dev_max = max(st_nr_dev * 2, 8);
4037 if (tmp_dev_max > ST_MAX_TAPES)
4038 tmp_dev_max = ST_MAX_TAPES;
4039 if (tmp_dev_max <= st_nr_dev) {
4040 write_unlock(&st_dev_arr_lock);
4041 printk(KERN_ERR "st: Too many tape devices (max. %d).\n",
4042 ST_MAX_TAPES);
4043 goto out_put_disk;
4044 }
4045
4046 tmp_da = kzalloc(tmp_dev_max * sizeof(struct scsi_tape *), GFP_ATOMIC);
4047 if (tmp_da == NULL) {
4048 write_unlock(&st_dev_arr_lock);
4049 printk(KERN_ERR "st: Can't extend device array.\n");
4050 goto out_put_disk;
4051 }
4052
4053 if (scsi_tapes != NULL) {
4054 memcpy(tmp_da, scsi_tapes,
4055 st_dev_max * sizeof(struct scsi_tape *));
4056 kfree(scsi_tapes);
4057 }
4058 scsi_tapes = tmp_da;
4059
4060 st_dev_max = tmp_dev_max;
4061 }
4062
4063 for (i = 0; i < st_dev_max; i++)
4064 if (scsi_tapes[i] == NULL)
4065 break;
4066 if (i >= st_dev_max)
4067 panic("scsi_devices corrupt (st)");
4068
4069 tpnt = kzalloc(sizeof(struct scsi_tape), GFP_ATOMIC); 4106 tpnt = kzalloc(sizeof(struct scsi_tape), GFP_ATOMIC);
4070 if (tpnt == NULL) { 4107 if (tpnt == NULL) {
4071 write_unlock(&st_dev_arr_lock);
4072 printk(KERN_ERR "st: Can't allocate device descriptor.\n"); 4108 printk(KERN_ERR "st: Can't allocate device descriptor.\n");
4073 goto out_put_disk; 4109 goto out_put_disk;
4074 } 4110 }
4075 kref_init(&tpnt->kref); 4111 kref_init(&tpnt->kref);
4076 tpnt->disk = disk; 4112 tpnt->disk = disk;
4077 sprintf(disk->disk_name, "st%d", i);
4078 disk->private_data = &tpnt->driver; 4113 disk->private_data = &tpnt->driver;
4079 disk->queue = SDp->request_queue; 4114 disk->queue = SDp->request_queue;
4080 tpnt->driver = &st_template; 4115 tpnt->driver = &st_template;
4081 scsi_tapes[i] = tpnt;
4082 dev_num = i;
4083 4116
4084 tpnt->device = SDp; 4117 tpnt->device = SDp;
4085 if (SDp->scsi_level <= 2) 4118 if (SDp->scsi_level <= 2)
@@ -4125,6 +4158,7 @@ static int st_probe(struct device *dev)
4125 STm->default_compression = ST_DONT_TOUCH; 4158 STm->default_compression = ST_DONT_TOUCH;
4126 STm->default_blksize = (-1); /* No forced size */ 4159 STm->default_blksize = (-1); /* No forced size */
4127 STm->default_density = (-1); /* No forced density */ 4160 STm->default_density = (-1); /* No forced density */
4161 STm->tape = tpnt;
4128 } 4162 }
4129 4163
4130 for (i = 0; i < ST_NBR_PARTITIONS; i++) { 4164 for (i = 0; i < ST_NBR_PARTITIONS; i++) {
@@ -4144,38 +4178,34 @@ static int st_probe(struct device *dev)
4144 tpnt->blksize_changed = 0; 4178 tpnt->blksize_changed = 0;
4145 mutex_init(&tpnt->lock); 4179 mutex_init(&tpnt->lock);
4146 4180
4147 st_nr_dev++; 4181 if (!idr_pre_get(&st_index_idr, GFP_KERNEL)) {
4148 write_unlock(&st_dev_arr_lock); 4182 pr_warn("st: idr expansion failed\n");
4183 error = -ENOMEM;
4184 goto out_put_disk;
4185 }
4149 4186
4150 for (mode = 0; mode < ST_NBR_MODES; ++mode) { 4187 spin_lock(&st_index_lock);
4151 STm = &(tpnt->modes[mode]); 4188 error = idr_get_new(&st_index_idr, tpnt, &dev_num);
4152 for (j=0; j < 2; j++) { 4189 spin_unlock(&st_index_lock);
4153 cdev = cdev_alloc(); 4190 if (error) {
4154 if (!cdev) { 4191 pr_warn("st: idr allocation failed: %d\n", error);
4155 printk(KERN_ERR 4192 goto out_put_disk;
4156 "st%d: out of memory. Device not attached.\n", 4193 }
4157 dev_num);
4158 goto out_free_tape;
4159 }
4160 cdev->owner = THIS_MODULE;
4161 cdev->ops = &st_fops;
4162
4163 error = cdev_add(cdev,
4164 MKDEV(SCSI_TAPE_MAJOR, TAPE_MINOR(dev_num, mode, j)),
4165 1);
4166 if (error) {
4167 printk(KERN_ERR "st%d: Can't add %s-rewind mode %d\n",
4168 dev_num, j ? "non" : "auto", mode);
4169 printk(KERN_ERR "st%d: Device not attached.\n", dev_num);
4170 goto out_free_tape;
4171 }
4172 STm->cdevs[j] = cdev;
4173 4194
4174 } 4195 if (dev_num > ST_MAX_TAPES) {
4175 error = do_create_class_files(tpnt, dev_num, mode); 4196 pr_err("st: Too many tape devices (max. %d).\n", ST_MAX_TAPES);
4176 if (error) 4197 goto out_put_index;
4177 goto out_free_tape;
4178 } 4198 }
4199
4200 tpnt->index = dev_num;
4201 sprintf(disk->disk_name, "st%d", dev_num);
4202
4203 dev_set_drvdata(dev, tpnt);
4204
4205
4206 error = create_cdevs(tpnt);
4207 if (error)
4208 goto out_remove_devs;
4179 scsi_autopm_put_device(SDp); 4209 scsi_autopm_put_device(SDp);
4180 4210
4181 sdev_printk(KERN_NOTICE, SDp, 4211 sdev_printk(KERN_NOTICE, SDp,
@@ -4186,28 +4216,12 @@ static int st_probe(struct device *dev)
4186 4216
4187 return 0; 4217 return 0;
4188 4218
4189out_free_tape: 4219out_remove_devs:
4190 for (mode=0; mode < ST_NBR_MODES; mode++) { 4220 remove_cdevs(tpnt);
4191 STm = &(tpnt->modes[mode]); 4221out_put_index:
4192 sysfs_remove_link(&tpnt->device->sdev_gendev.kobj, 4222 spin_lock(&st_index_lock);
4193 "tape"); 4223 idr_remove(&st_index_idr, dev_num);
4194 for (j=0; j < 2; j++) { 4224 spin_unlock(&st_index_lock);
4195 if (STm->cdevs[j]) {
4196 if (cdev == STm->cdevs[j])
4197 cdev = NULL;
4198 device_destroy(st_sysfs_class,
4199 MKDEV(SCSI_TAPE_MAJOR,
4200 TAPE_MINOR(i, mode, j)));
4201 cdev_del(STm->cdevs[j]);
4202 }
4203 }
4204 }
4205 if (cdev)
4206 cdev_del(cdev);
4207 write_lock(&st_dev_arr_lock);
4208 scsi_tapes[dev_num] = NULL;
4209 st_nr_dev--;
4210 write_unlock(&st_dev_arr_lock);
4211out_put_disk: 4225out_put_disk:
4212 put_disk(disk); 4226 put_disk(disk);
4213 kfree(tpnt); 4227 kfree(tpnt);
@@ -4220,38 +4234,18 @@ out:
4220 4234
4221static int st_remove(struct device *dev) 4235static int st_remove(struct device *dev)
4222{ 4236{
4223 struct scsi_device *SDp = to_scsi_device(dev); 4237 struct scsi_tape *tpnt = dev_get_drvdata(dev);
4224 struct scsi_tape *tpnt; 4238 int index = tpnt->index;
4225 int i, j, mode;
4226
4227 scsi_autopm_get_device(SDp);
4228 write_lock(&st_dev_arr_lock);
4229 for (i = 0; i < st_dev_max; i++) {
4230 tpnt = scsi_tapes[i];
4231 if (tpnt != NULL && tpnt->device == SDp) {
4232 scsi_tapes[i] = NULL;
4233 st_nr_dev--;
4234 write_unlock(&st_dev_arr_lock);
4235 sysfs_remove_link(&tpnt->device->sdev_gendev.kobj,
4236 "tape");
4237 for (mode = 0; mode < ST_NBR_MODES; ++mode) {
4238 for (j=0; j < 2; j++) {
4239 device_destroy(st_sysfs_class,
4240 MKDEV(SCSI_TAPE_MAJOR,
4241 TAPE_MINOR(i, mode, j)));
4242 cdev_del(tpnt->modes[mode].cdevs[j]);
4243 tpnt->modes[mode].cdevs[j] = NULL;
4244 }
4245 }
4246 4239
4247 mutex_lock(&st_ref_mutex); 4240 scsi_autopm_get_device(to_scsi_device(dev));
4248 kref_put(&tpnt->kref, scsi_tape_release); 4241 remove_cdevs(tpnt);
4249 mutex_unlock(&st_ref_mutex);
4250 return 0;
4251 }
4252 }
4253 4242
4254 write_unlock(&st_dev_arr_lock); 4243 mutex_lock(&st_ref_mutex);
4244 kref_put(&tpnt->kref, scsi_tape_release);
4245 mutex_unlock(&st_ref_mutex);
4246 spin_lock(&st_index_lock);
4247 idr_remove(&st_index_idr, index);
4248 spin_unlock(&st_index_lock);
4255 return 0; 4249 return 0;
4256} 4250}
4257 4251
@@ -4283,6 +4277,11 @@ static void scsi_tape_release(struct kref *kref)
4283 return; 4277 return;
4284} 4278}
4285 4279
4280static struct class st_sysfs_class = {
4281 .name = "scsi_tape",
4282 .dev_attrs = st_dev_attrs,
4283};
4284
4286static int __init init_st(void) 4285static int __init init_st(void)
4287{ 4286{
4288 int err; 4287 int err;
@@ -4292,10 +4291,10 @@ static int __init init_st(void)
4292 printk(KERN_INFO "st: Version %s, fixed bufsize %d, s/g segs %d\n", 4291 printk(KERN_INFO "st: Version %s, fixed bufsize %d, s/g segs %d\n",
4293 verstr, st_fixed_buffer_size, st_max_sg_segs); 4292 verstr, st_fixed_buffer_size, st_max_sg_segs);
4294 4293
4295 st_sysfs_class = class_create(THIS_MODULE, "scsi_tape"); 4294 err = class_register(&st_sysfs_class);
4296 if (IS_ERR(st_sysfs_class)) { 4295 if (err) {
4297 printk(KERN_ERR "Unable create sysfs class for SCSI tapes\n"); 4296 pr_err("Unable register sysfs class for SCSI tapes\n");
4298 return PTR_ERR(st_sysfs_class); 4297 return err;
4299 } 4298 }
4300 4299
4301 err = register_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0), 4300 err = register_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
@@ -4322,7 +4321,7 @@ err_chrdev:
4322 unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0), 4321 unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
4323 ST_MAX_TAPE_ENTRIES); 4322 ST_MAX_TAPE_ENTRIES);
4324err_class: 4323err_class:
4325 class_destroy(st_sysfs_class); 4324 class_unregister(&st_sysfs_class);
4326 return err; 4325 return err;
4327} 4326}
4328 4327
@@ -4332,8 +4331,7 @@ static void __exit exit_st(void)
4332 scsi_unregister_driver(&st_template.gendrv); 4331 scsi_unregister_driver(&st_template.gendrv);
4333 unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0), 4332 unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
4334 ST_MAX_TAPE_ENTRIES); 4333 ST_MAX_TAPE_ENTRIES);
4335 class_destroy(st_sysfs_class); 4334 class_unregister(&st_sysfs_class);
4336 kfree(scsi_tapes);
4337 printk(KERN_INFO "st: Unloaded.\n"); 4335 printk(KERN_INFO "st: Unloaded.\n");
4338} 4336}
4339 4337
@@ -4405,10 +4403,9 @@ static void do_remove_sysfs_files(void)
4405 driver_remove_file(sysfs, &driver_attr_try_direct_io); 4403 driver_remove_file(sysfs, &driver_attr_try_direct_io);
4406} 4404}
4407 4405
4408
4409/* The sysfs simple class interface */ 4406/* The sysfs simple class interface */
4410static ssize_t 4407static ssize_t
4411st_defined_show(struct device *dev, struct device_attribute *attr, char *buf) 4408defined_show(struct device *dev, struct device_attribute *attr, char *buf)
4412{ 4409{
4413 struct st_modedef *STm = dev_get_drvdata(dev); 4410 struct st_modedef *STm = dev_get_drvdata(dev);
4414 ssize_t l = 0; 4411 ssize_t l = 0;
@@ -4417,10 +4414,9 @@ st_defined_show(struct device *dev, struct device_attribute *attr, char *buf)
4417 return l; 4414 return l;
4418} 4415}
4419 4416
4420DEVICE_ATTR(defined, S_IRUGO, st_defined_show, NULL);
4421
4422static ssize_t 4417static ssize_t
4423st_defblk_show(struct device *dev, struct device_attribute *attr, char *buf) 4418default_blksize_show(struct device *dev, struct device_attribute *attr,
4419 char *buf)
4424{ 4420{
4425 struct st_modedef *STm = dev_get_drvdata(dev); 4421 struct st_modedef *STm = dev_get_drvdata(dev);
4426 ssize_t l = 0; 4422 ssize_t l = 0;
@@ -4429,10 +4425,10 @@ st_defblk_show(struct device *dev, struct device_attribute *attr, char *buf)
4429 return l; 4425 return l;
4430} 4426}
4431 4427
4432DEVICE_ATTR(default_blksize, S_IRUGO, st_defblk_show, NULL);
4433 4428
4434static ssize_t 4429static ssize_t
4435st_defdensity_show(struct device *dev, struct device_attribute *attr, char *buf) 4430default_density_show(struct device *dev, struct device_attribute *attr,
4431 char *buf)
4436{ 4432{
4437 struct st_modedef *STm = dev_get_drvdata(dev); 4433 struct st_modedef *STm = dev_get_drvdata(dev);
4438 ssize_t l = 0; 4434 ssize_t l = 0;
@@ -4443,11 +4439,9 @@ st_defdensity_show(struct device *dev, struct device_attribute *attr, char *buf)
4443 return l; 4439 return l;
4444} 4440}
4445 4441
4446DEVICE_ATTR(default_density, S_IRUGO, st_defdensity_show, NULL);
4447
4448static ssize_t 4442static ssize_t
4449st_defcompression_show(struct device *dev, struct device_attribute *attr, 4443default_compression_show(struct device *dev, struct device_attribute *attr,
4450 char *buf) 4444 char *buf)
4451{ 4445{
4452 struct st_modedef *STm = dev_get_drvdata(dev); 4446 struct st_modedef *STm = dev_get_drvdata(dev);
4453 ssize_t l = 0; 4447 ssize_t l = 0;
@@ -4456,28 +4450,14 @@ st_defcompression_show(struct device *dev, struct device_attribute *attr,
4456 return l; 4450 return l;
4457} 4451}
4458 4452
4459DEVICE_ATTR(default_compression, S_IRUGO, st_defcompression_show, NULL);
4460
4461static ssize_t 4453static ssize_t
4462st_options_show(struct device *dev, struct device_attribute *attr, char *buf) 4454options_show(struct device *dev, struct device_attribute *attr, char *buf)
4463{ 4455{
4464 struct st_modedef *STm = dev_get_drvdata(dev); 4456 struct st_modedef *STm = dev_get_drvdata(dev);
4465 struct scsi_tape *STp; 4457 struct scsi_tape *STp = STm->tape;
4466 int i, j, options; 4458 int options;
4467 ssize_t l = 0; 4459 ssize_t l = 0;
4468 4460
4469 for (i=0; i < st_dev_max; i++) {
4470 for (j=0; j < ST_NBR_MODES; j++)
4471 if (&scsi_tapes[i]->modes[j] == STm)
4472 break;
4473 if (j < ST_NBR_MODES)
4474 break;
4475 }
4476 if (i == st_dev_max)
4477 return 0; /* should never happen */
4478
4479 STp = scsi_tapes[i];
4480
4481 options = STm->do_buffer_writes ? MT_ST_BUFFER_WRITES : 0; 4461 options = STm->do_buffer_writes ? MT_ST_BUFFER_WRITES : 0;
4482 options |= STm->do_async_writes ? MT_ST_ASYNC_WRITES : 0; 4462 options |= STm->do_async_writes ? MT_ST_ASYNC_WRITES : 0;
4483 options |= STm->do_read_ahead ? MT_ST_READ_AHEAD : 0; 4463 options |= STm->do_read_ahead ? MT_ST_READ_AHEAD : 0;
@@ -4498,66 +4478,14 @@ st_options_show(struct device *dev, struct device_attribute *attr, char *buf)
4498 return l; 4478 return l;
4499} 4479}
4500 4480
4501DEVICE_ATTR(options, S_IRUGO, st_options_show, NULL); 4481static struct device_attribute st_dev_attrs[] = {
4502 4482 __ATTR_RO(defined),
4503static int do_create_class_files(struct scsi_tape *STp, int dev_num, int mode) 4483 __ATTR_RO(default_blksize),
4504{ 4484 __ATTR_RO(default_density),
4505 int i, rew, error; 4485 __ATTR_RO(default_compression),
4506 char name[10]; 4486 __ATTR_RO(options),
4507 struct device *st_class_member; 4487 __ATTR_NULL,
4508 4488};
4509 for (rew=0; rew < 2; rew++) {
4510 /* Make sure that the minor numbers corresponding to the four
4511 first modes always get the same names */
4512 i = mode << (4 - ST_NBR_MODE_BITS);
4513 snprintf(name, 10, "%s%s%s", rew ? "n" : "",
4514 STp->disk->disk_name, st_formats[i]);
4515 st_class_member =
4516 device_create(st_sysfs_class, &STp->device->sdev_gendev,
4517 MKDEV(SCSI_TAPE_MAJOR,
4518 TAPE_MINOR(dev_num, mode, rew)),
4519 &STp->modes[mode], "%s", name);
4520 if (IS_ERR(st_class_member)) {
4521 printk(KERN_WARNING "st%d: device_create failed\n",
4522 dev_num);
4523 error = PTR_ERR(st_class_member);
4524 goto out;
4525 }
4526
4527 error = device_create_file(st_class_member,
4528 &dev_attr_defined);
4529 if (error) goto out;
4530 error = device_create_file(st_class_member,
4531 &dev_attr_default_blksize);
4532 if (error) goto out;
4533 error = device_create_file(st_class_member,
4534 &dev_attr_default_density);
4535 if (error) goto out;
4536 error = device_create_file(st_class_member,
4537 &dev_attr_default_compression);
4538 if (error) goto out;
4539 error = device_create_file(st_class_member,
4540 &dev_attr_options);
4541 if (error) goto out;
4542
4543 if (mode == 0 && rew == 0) {
4544 error = sysfs_create_link(&STp->device->sdev_gendev.kobj,
4545 &st_class_member->kobj,
4546 "tape");
4547 if (error) {
4548 printk(KERN_ERR
4549 "st%d: Can't create sysfs link from SCSI device.\n",
4550 dev_num);
4551 goto out;
4552 }
4553 }
4554 }
4555
4556 return 0;
4557
4558out:
4559 return error;
4560}
4561 4489
4562/* The following functions may be useful for a larger audience. */ 4490/* The following functions may be useful for a larger audience. */
4563static int sgl_map_user_pages(struct st_buffer *STbp, 4491static int sgl_map_user_pages(struct st_buffer *STbp,
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index b548923785e..f3eee0f9f40 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -66,6 +66,8 @@ struct st_modedef {
66 unsigned char default_compression; /* 0 = don't touch, etc */ 66 unsigned char default_compression; /* 0 = don't touch, etc */
67 short default_density; /* Forced density, -1 = no value */ 67 short default_density; /* Forced density, -1 = no value */
68 int default_blksize; /* Forced blocksize, -1 = no value */ 68 int default_blksize; /* Forced blocksize, -1 = no value */
69 struct scsi_tape *tape;
70 struct device *devs[2]; /* Auto-rewind and non-rewind devices */
69 struct cdev *cdevs[2]; /* Auto-rewind and non-rewind devices */ 71 struct cdev *cdevs[2]; /* Auto-rewind and non-rewind devices */
70}; 72};
71 73
@@ -76,7 +78,7 @@ struct st_modedef {
76#define ST_MODE_SHIFT (7 - ST_NBR_MODE_BITS) 78#define ST_MODE_SHIFT (7 - ST_NBR_MODE_BITS)
77#define ST_MODE_MASK ((ST_NBR_MODES - 1) << ST_MODE_SHIFT) 79#define ST_MODE_MASK ((ST_NBR_MODES - 1) << ST_MODE_SHIFT)
78 80
79#define ST_MAX_TAPES 128 81#define ST_MAX_TAPES (1 << (20 - (ST_NBR_MODE_BITS + 1)))
80#define ST_MAX_TAPE_ENTRIES (ST_MAX_TAPES << (ST_NBR_MODE_BITS + 1)) 82#define ST_MAX_TAPE_ENTRIES (ST_MAX_TAPES << (ST_NBR_MODE_BITS + 1))
81 83
82/* The status related to each partition */ 84/* The status related to each partition */
@@ -99,6 +101,7 @@ struct scsi_tape {
99 struct mutex lock; /* For serialization */ 101 struct mutex lock; /* For serialization */
100 struct completion wait; /* For SCSI commands */ 102 struct completion wait; /* For SCSI commands */
101 struct st_buffer *buffer; 103 struct st_buffer *buffer;
104 int index;
102 105
103 /* Drive characteristics */ 106 /* Drive characteristics */
104 unsigned char omit_blklims; 107 unsigned char omit_blklims;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 464e67c2e77..77eeeda2b6e 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -185,6 +185,7 @@ enum {
185 ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */ 185 ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */
186 ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */ 186 ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */
187 ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */ 187 ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
188 ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */
188 189
189 /* struct ata_port flags */ 190 /* struct ata_port flags */
190 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ 191 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
@@ -990,8 +991,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
990 irq_handler_t irq_handler, unsigned long irq_flags, 991 irq_handler_t irq_handler, unsigned long irq_flags,
991 struct scsi_host_template *sht); 992 struct scsi_host_template *sht);
992extern void ata_host_detach(struct ata_host *host); 993extern void ata_host_detach(struct ata_host *host);
993extern void ata_host_init(struct ata_host *, struct device *, 994extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *);
994 unsigned long, struct ata_port_operations *);
995extern int ata_scsi_detect(struct scsi_host_template *sht); 995extern int ata_scsi_detect(struct scsi_host_template *sht);
996extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 996extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
997extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd); 997extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
@@ -1016,6 +1016,17 @@ extern bool ata_link_offline(struct ata_link *link);
1016#ifdef CONFIG_PM 1016#ifdef CONFIG_PM
1017extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); 1017extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
1018extern void ata_host_resume(struct ata_host *host); 1018extern void ata_host_resume(struct ata_host *host);
1019extern int ata_sas_port_async_suspend(struct ata_port *ap, int *async);
1020extern int ata_sas_port_async_resume(struct ata_port *ap, int *async);
1021#else
1022static inline int ata_sas_port_async_suspend(struct ata_port *ap, int *async)
1023{
1024 return 0;
1025}
1026static inline int ata_sas_port_async_resume(struct ata_port *ap, int *async)
1027{
1028 return 0;
1029}
1019#endif 1030#endif
1020extern int ata_ratelimit(void); 1031extern int ata_ratelimit(void);
1021extern void ata_msleep(struct ata_port *ap, unsigned int msecs); 1032extern void ata_msleep(struct ata_port *ap, unsigned int msecs);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 8d3c4271938..33880f6f4e5 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -753,6 +753,7 @@
753#define PCI_DEVICE_ID_HP_CISSD 0x3238 753#define PCI_DEVICE_ID_HP_CISSD 0x3238
754#define PCI_DEVICE_ID_HP_CISSE 0x323a 754#define PCI_DEVICE_ID_HP_CISSE 0x323a
755#define PCI_DEVICE_ID_HP_CISSF 0x323b 755#define PCI_DEVICE_ID_HP_CISSF 0x323b
756#define PCI_DEVICE_ID_HP_CISSH 0x323c
756#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 757#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031
757 758
758#define PCI_VENDOR_ID_PCTECH 0x1042 759#define PCI_VENDOR_ID_PCTECH 0x1042
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index ae33706afeb..ef937b56f9b 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -79,7 +79,8 @@ enum phy_event {
79 PHYE_OOB_DONE = 1, 79 PHYE_OOB_DONE = 1,
80 PHYE_OOB_ERROR = 2, 80 PHYE_OOB_ERROR = 2,
81 PHYE_SPINUP_HOLD = 3, /* hot plug SATA, no COMWAKE sent */ 81 PHYE_SPINUP_HOLD = 3, /* hot plug SATA, no COMWAKE sent */
82 PHY_NUM_EVENTS = 4, 82 PHYE_RESUME_TIMEOUT = 4,
83 PHY_NUM_EVENTS = 5,
83}; 84};
84 85
85enum discover_event { 86enum discover_event {
@@ -87,8 +88,10 @@ enum discover_event {
87 DISCE_REVALIDATE_DOMAIN = 1, 88 DISCE_REVALIDATE_DOMAIN = 1,
88 DISCE_PORT_GONE = 2, 89 DISCE_PORT_GONE = 2,
89 DISCE_PROBE = 3, 90 DISCE_PROBE = 3,
90 DISCE_DESTRUCT = 4, 91 DISCE_SUSPEND = 4,
91 DISC_NUM_EVENTS = 5, 92 DISCE_RESUME = 5,
93 DISCE_DESTRUCT = 6,
94 DISC_NUM_EVENTS = 7,
92}; 95};
93 96
94/* ---------- Expander Devices ---------- */ 97/* ---------- Expander Devices ---------- */
@@ -128,7 +131,7 @@ struct ex_phy {
128 u8 attached_sas_addr[SAS_ADDR_SIZE]; 131 u8 attached_sas_addr[SAS_ADDR_SIZE];
129 u8 attached_phy_id; 132 u8 attached_phy_id;
130 133
131 u8 phy_change_count; 134 int phy_change_count;
132 enum routing_attribute routing_attr; 135 enum routing_attribute routing_attr;
133 u8 virtual:1; 136 u8 virtual:1;
134 137
@@ -141,7 +144,7 @@ struct ex_phy {
141struct expander_device { 144struct expander_device {
142 struct list_head children; 145 struct list_head children;
143 146
144 u16 ex_change_count; 147 int ex_change_count;
145 u16 max_route_indexes; 148 u16 max_route_indexes;
146 u8 num_phys; 149 u8 num_phys;
147 150
@@ -169,6 +172,7 @@ struct sata_device {
169 enum ata_command_set command_set; 172 enum ata_command_set command_set;
170 struct smp_resp rps_resp; /* report_phy_sata_resp */ 173 struct smp_resp rps_resp; /* report_phy_sata_resp */
171 u8 port_no; /* port number, if this is a PM (Port) */ 174 u8 port_no; /* port number, if this is a PM (Port) */
175 int pm_result;
172 176
173 struct ata_port *ap; 177 struct ata_port *ap;
174 struct ata_host ata_host; 178 struct ata_host ata_host;
@@ -182,6 +186,7 @@ struct ssp_device {
182 186
183enum { 187enum {
184 SAS_DEV_GONE, 188 SAS_DEV_GONE,
189 SAS_DEV_FOUND, /* device notified to lldd */
185 SAS_DEV_DESTROY, 190 SAS_DEV_DESTROY,
186 SAS_DEV_EH_PENDING, 191 SAS_DEV_EH_PENDING,
187 SAS_DEV_LU_RESET, 192 SAS_DEV_LU_RESET,
@@ -273,6 +278,7 @@ struct asd_sas_port {
273 enum sas_linkrate linkrate; 278 enum sas_linkrate linkrate;
274 279
275 struct sas_work work; 280 struct sas_work work;
281 int suspended;
276 282
277/* public: */ 283/* public: */
278 int id; 284 int id;
@@ -321,6 +327,7 @@ struct asd_sas_phy {
321 unsigned long phy_events_pending; 327 unsigned long phy_events_pending;
322 328
323 int error; 329 int error;
330 int suspended;
324 331
325 struct sas_phy *phy; 332 struct sas_phy *phy;
326 333
@@ -687,6 +694,9 @@ struct sas_domain_function_template {
687 694
688extern int sas_register_ha(struct sas_ha_struct *); 695extern int sas_register_ha(struct sas_ha_struct *);
689extern int sas_unregister_ha(struct sas_ha_struct *); 696extern int sas_unregister_ha(struct sas_ha_struct *);
697extern void sas_prep_resume_ha(struct sas_ha_struct *sas_ha);
698extern void sas_resume_ha(struct sas_ha_struct *sas_ha);
699extern void sas_suspend_ha(struct sas_ha_struct *sas_ha);
690 700
691int sas_set_phy_speed(struct sas_phy *phy, 701int sas_set_phy_speed(struct sas_phy *phy,
692 struct sas_phy_linkrates *rates); 702 struct sas_phy_linkrates *rates);
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
index 2dfbdaa0b34..ff71a565468 100644
--- a/include/scsi/sas_ata.h
+++ b/include/scsi/sas_ata.h
@@ -45,6 +45,8 @@ void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
45void sas_ata_schedule_reset(struct domain_device *dev); 45void sas_ata_schedule_reset(struct domain_device *dev);
46void sas_ata_wait_eh(struct domain_device *dev); 46void sas_ata_wait_eh(struct domain_device *dev);
47void sas_probe_sata(struct asd_sas_port *port); 47void sas_probe_sata(struct asd_sas_port *port);
48void sas_suspend_sata(struct asd_sas_port *port);
49void sas_resume_sata(struct asd_sas_port *port);
48void sas_ata_end_eh(struct ata_port *ap); 50void sas_ata_end_eh(struct ata_port *ap);
49#else 51#else
50 52
@@ -82,6 +84,14 @@ static inline void sas_probe_sata(struct asd_sas_port *port)
82{ 84{
83} 85}
84 86
87static inline void sas_suspend_sata(struct asd_sas_port *port)
88{
89}
90
91static inline void sas_resume_sata(struct asd_sas_port *port)
92{
93}
94
85static inline int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) 95static inline int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
86{ 96{
87 return 0; 97 return 0;
diff --git a/include/scsi/scsi_bsg_fc.h b/include/scsi/scsi_bsg_fc.h
index 91a4e4ff9a9..3031b900b08 100644
--- a/include/scsi/scsi_bsg_fc.h
+++ b/include/scsi/scsi_bsg_fc.h
@@ -26,8 +26,6 @@
26 * This file intended to be included by both kernel and user space 26 * This file intended to be included by both kernel and user space
27 */ 27 */
28 28
29#include <scsi/scsi.h>
30
31/* 29/*
32 * FC Transport SGIO v4 BSG Message Support 30 * FC Transport SGIO v4 BSG Message Support
33 */ 31 */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 9895f69294f..88fae8d2015 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -156,6 +156,7 @@ struct scsi_device {
156 unsigned is_visible:1; /* is the device visible in sysfs */ 156 unsigned is_visible:1; /* is the device visible in sysfs */
157 unsigned can_power_off:1; /* Device supports runtime power off */ 157 unsigned can_power_off:1; /* Device supports runtime power off */
158 unsigned wce_default_on:1; /* Cache is ON by default */ 158 unsigned wce_default_on:1; /* Cache is ON by default */
159 unsigned no_dif:1; /* T10 PI (DIF) should be disabled */
159 160
160 DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ 161 DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
161 struct list_head event_list; /* asserted events */ 162 struct list_head event_list; /* asserted events */
@@ -476,6 +477,9 @@ static inline int scsi_device_enclosure(struct scsi_device *sdev)
476 477
477static inline int scsi_device_protection(struct scsi_device *sdev) 478static inline int scsi_device_protection(struct scsi_device *sdev)
478{ 479{
480 if (sdev->no_dif)
481 return 0;
482
479 return sdev->scsi_level > SCSI_2 && sdev->inquiry[5] & (1<<0); 483 return sdev->scsi_level > SCSI_2 && sdev->inquiry[5] & (1<<0);
480} 484}
481 485
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index b4ddd3b18b4..cc1f3e786ad 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -30,4 +30,5 @@
30#define BLIST_RETRY_HWERROR 0x400000 /* retry HARDWARE_ERROR */ 30#define BLIST_RETRY_HWERROR 0x400000 /* retry HARDWARE_ERROR */
31#define BLIST_MAX_512 0x800000 /* maximum 512 sector cdb length */ 31#define BLIST_MAX_512 0x800000 /* maximum 512 sector cdb length */
32#define BLIST_ATTACH_PQ3 0x1000000 /* Scan: Attach to PQ3 devices */ 32#define BLIST_ATTACH_PQ3 0x1000000 /* Scan: Attach to PQ3 devices */
33#define BLIST_NO_DIF 0x2000000 /* Disable T10 PI (DIF) */
33#endif 34#endif
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 5f7d5b3b1c6..49084807eb6 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -873,6 +873,9 @@ static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsign
873 SHOST_DIF_TYPE2_PROTECTION, 873 SHOST_DIF_TYPE2_PROTECTION,
874 SHOST_DIF_TYPE3_PROTECTION }; 874 SHOST_DIF_TYPE3_PROTECTION };
875 875
876 if (target_type > SHOST_DIF_TYPE3_PROTECTION)
877 return 0;
878
876 return shost->prot_capabilities & cap[target_type] ? target_type : 0; 879 return shost->prot_capabilities & cap[target_type] ? target_type : 0;
877} 880}
878 881
@@ -884,6 +887,9 @@ static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsign
884 SHOST_DIX_TYPE2_PROTECTION, 887 SHOST_DIX_TYPE2_PROTECTION,
885 SHOST_DIX_TYPE3_PROTECTION }; 888 SHOST_DIX_TYPE3_PROTECTION };
886 889
890 if (target_type > SHOST_DIX_TYPE3_PROTECTION)
891 return 0;
892
887 return shost->prot_capabilities & cap[target_type]; 893 return shost->prot_capabilities & cap[target_type];
888#endif 894#endif
889 return 0; 895 return 0;